Repository: 0ssamaak0/DLTA-AI Branch: master Commit: 9296b693d28c Files: 1415 Total size: 5.9 MB Directory structure: gitextract_auoxlo5f/ ├── .github/ │ └── workflows/ │ └── retype-action.yml ├── .gitignore ├── DLTA_AI_app/ │ ├── .flake8 │ ├── .gitignore │ ├── .gitmodules │ ├── __main__.py │ ├── __main__.spec │ ├── inferencing.py │ ├── labelme/ │ │ ├── __init__.py │ │ ├── app.py │ │ ├── cli/ │ │ │ ├── __init__.py │ │ │ ├── draw_json.py │ │ │ ├── draw_label_png.py │ │ │ ├── json_to_dataset.py │ │ │ └── on_docker.py │ │ ├── config/ │ │ │ ├── __init__.py │ │ │ ├── default_config.yaml │ │ │ └── default_config_base.yaml │ │ ├── intelligence.py │ │ ├── label_file.py │ │ ├── logger.py │ │ ├── shape.py │ │ ├── testing.py │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ ├── _io.py │ │ │ ├── custom_exports.py │ │ │ ├── export.py │ │ │ ├── helpers/ │ │ │ │ ├── mathOps.py │ │ │ │ └── visualizations.py │ │ │ ├── image.py │ │ │ ├── model_explorer.py │ │ │ ├── qt.py │ │ │ ├── sam.py │ │ │ ├── shape.py │ │ │ └── vid_to_frames.py │ │ └── widgets/ │ │ ├── ClassesWidget.py │ │ ├── MsgBox.py │ │ ├── ThresholdWidget.py │ │ ├── __init__.py │ │ ├── brightness_contrast_dialog.py │ │ ├── canvas.py │ │ ├── check_updates_UI.py │ │ ├── color_dialog.py │ │ ├── deleteSelectedShape_UI.py │ │ ├── editLabel_videoMode.py │ │ ├── escapable_qlist_widget.py │ │ ├── exportData_UI.py │ │ ├── feedback_UI.py │ │ ├── getIDfromUser_UI.py │ │ ├── interpolation_UI.py │ │ ├── label_dialog.py │ │ ├── label_list_widget.py │ │ ├── links.py │ │ ├── merge_feature_UI.py │ │ ├── notification.py │ │ ├── open_file.py │ │ ├── preferences_UI.py │ │ ├── runtime_data_UI.py │ │ ├── scaleObject_UI.py │ │ ├── segmentation_options_UI.py │ │ ├── shortcut_selector_UI.py │ │ ├── tool_bar.py │ │ ├── unique_label_qlist_widget.py │ │ └── zoom_widget.py │ ├── mmdetection/ │ │ ├── .circleci/ │ │ │ └── config.yml │ │ ├── .dev_scripts/ │ │ │ ├── batch_test_list.py │ │ │ ├── batch_train_list.txt │ │ │ ├── benchmark_filter.py │ │ │ ├── benchmark_inference_fps.py │ │ │ ├── benchmark_test_image.py │ │ │ ├── check_links.py │ │ │ ├── convert_test_benchmark_script.py │ │ │ ├── convert_train_benchmark_script.py │ │ │ ├── gather_models.py │ │ │ ├── gather_test_benchmark_metric.py │ │ │ ├── gather_train_benchmark_metric.py │ │ │ ├── linter.sh │ │ │ ├── test_benchmark.sh │ │ │ ├── test_init_backbone.py │ │ │ └── train_benchmark.sh │ │ ├── .gitignore │ │ ├── .owners.yml │ │ ├── .pre-commit-config.yaml │ │ ├── .readthedocs.yml │ │ ├── CITATION.cff │ │ ├── LICENSE │ │ ├── MANIFEST.in │ │ ├── configs/ │ │ │ ├── _base_/ │ │ │ │ ├── datasets/ │ │ │ │ │ ├── cityscapes_detection.py │ │ │ │ │ ├── cityscapes_instance.py │ │ │ │ │ ├── coco_detection.py │ │ │ │ │ ├── coco_instance.py │ │ │ │ │ ├── coco_instance_semantic.py │ │ │ │ │ ├── coco_panoptic.py │ │ │ │ │ ├── deepfashion.py │ │ │ │ │ ├── lvis_v0.5_instance.py │ │ │ │ │ ├── lvis_v1_instance.py │ │ │ │ │ ├── openimages_detection.py │ │ │ │ │ ├── voc0712.py │ │ │ │ │ └── wider_face.py │ │ │ │ ├── default_runtime.py │ │ │ │ ├── models/ │ │ │ │ │ ├── cascade_mask_rcnn_r50_fpn.py │ │ │ │ │ ├── cascade_rcnn_r50_fpn.py │ │ │ │ │ ├── fast_rcnn_r50_fpn.py │ │ │ │ │ ├── faster_rcnn_r50_caffe_c4.py │ │ │ │ │ ├── faster_rcnn_r50_caffe_dc5.py │ │ │ │ │ ├── faster_rcnn_r50_fpn.py │ │ │ │ │ ├── mask_rcnn_r50_caffe_c4.py │ │ │ │ │ ├── mask_rcnn_r50_fpn.py │ │ │ │ │ ├── retinanet_r50_fpn.py │ │ │ │ │ ├── rpn_r50_caffe_c4.py │ │ │ │ │ ├── rpn_r50_fpn.py │ │ │ │ │ └── ssd300.py │ │ │ │ └── schedules/ │ │ │ │ ├── schedule_1x.py │ │ │ │ ├── schedule_20e.py │ │ │ │ └── schedule_2x.py │ │ │ ├── albu_example/ │ │ │ │ └── mask_rcnn_r50_fpn_albu_1x_coco.py │ │ │ ├── atss/ │ │ │ │ ├── atss_r101_fpn_1x_coco.py │ │ │ │ ├── atss_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── autoassign/ │ │ │ │ ├── autoassign_r50_fpn_8x2_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── carafe/ │ │ │ │ ├── faster_rcnn_r50_fpn_carafe_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_carafe_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── cascade_rcnn/ │ │ │ │ ├── cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_r101_fpn_20e_coco.py │ │ │ │ ├── cascade_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_r50_fpn_20e_coco.py │ │ │ │ ├── cascade_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_x101_32x4d_fpn_20e_coco.py │ │ │ │ ├── cascade_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_x101_64x4d_fpn_20e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── cascade_rpn/ │ │ │ │ ├── crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── crpn_r50_caffe_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── centernet/ │ │ │ │ ├── centernet_resnet18_140e_coco.py │ │ │ │ ├── centernet_resnet18_dcnv2_140e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── centripetalnet/ │ │ │ │ ├── centripetalnet_hourglass104_mstest_16x6_210e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── cityscapes/ │ │ │ │ ├── faster_rcnn_r50_fpn_1x_cityscapes.py │ │ │ │ └── mask_rcnn_r50_fpn_1x_cityscapes.py │ │ │ ├── common/ │ │ │ │ ├── lsj_100e_coco_instance.py │ │ │ │ ├── mstrain-poly_3x_coco_instance.py │ │ │ │ ├── mstrain_3x_coco.py │ │ │ │ ├── mstrain_3x_coco_instance.py │ │ │ │ ├── ssj_270k_coco_instance.py │ │ │ │ └── ssj_scp_270k_coco_instance.py │ │ │ ├── convnext/ │ │ │ │ ├── cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py │ │ │ │ ├── mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── cornernet/ │ │ │ │ ├── cornernet_hourglass104_mstest_10x5_210e_coco.py │ │ │ │ ├── cornernet_hourglass104_mstest_32x3_210e_coco.py │ │ │ │ ├── cornernet_hourglass104_mstest_8x6_210e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── dcn/ │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_dpool_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── dcnv2/ │ │ │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_mdpool_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── ddod/ │ │ │ │ ├── ddod_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── deepfashion/ │ │ │ │ └── mask_rcnn_r50_fpn_15e_deepfashion.py │ │ │ ├── deformable_detr/ │ │ │ │ ├── deformable_detr_r50_16x2_50e_coco.py │ │ │ │ ├── deformable_detr_refine_r50_16x2_50e_coco.py │ │ │ │ ├── deformable_detr_twostage_refine_r50_16x2_50e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── detectors/ │ │ │ │ ├── cascade_rcnn_r50_rfp_1x_coco.py │ │ │ │ ├── cascade_rcnn_r50_sac_1x_coco.py │ │ │ │ ├── detectors_cascade_rcnn_r50_1x_coco.py │ │ │ │ ├── detectors_htc_r101_20e_coco.py │ │ │ │ ├── detectors_htc_r50_1x_coco.py │ │ │ │ ├── htc_r50_rfp_1x_coco.py │ │ │ │ ├── htc_r50_sac_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── detr/ │ │ │ │ ├── detr_r50_8x2_150e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── double_heads/ │ │ │ │ ├── dh_faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── dyhead/ │ │ │ │ ├── atss_r50_caffe_fpn_dyhead_1x_coco.py │ │ │ │ ├── atss_r50_fpn_dyhead_1x_coco.py │ │ │ │ ├── atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── dynamic_rcnn/ │ │ │ │ ├── dynamic_rcnn_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── efficientnet/ │ │ │ │ ├── metafile.yml │ │ │ │ └── retinanet_effb3_fpn_crop896_8x4_1x_coco.py │ │ │ ├── empirical_attention/ │ │ │ │ ├── faster_rcnn_r50_fpn_attention_0010_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_attention_1111_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── fast_rcnn/ │ │ │ │ ├── fast_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── fast_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── fast_rcnn_r101_fpn_2x_coco.py │ │ │ │ ├── fast_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── fast_rcnn_r50_fpn_1x_coco.py │ │ │ │ └── fast_rcnn_r50_fpn_2x_coco.py │ │ │ ├── faster_rcnn/ │ │ │ │ ├── faster_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_r101_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_r101_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_c4_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_dc5_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_90k_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_bounded_iou_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_ciou_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_fp16_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_giou_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_iou_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_ohem_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_soft_nms_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── fcos/ │ │ │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py │ │ │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py │ │ │ │ ├── fcos_center_r50_caffe_fpn_gn-head_1x_coco.py │ │ │ │ ├── fcos_r101_caffe_fpn_gn-head_1x_coco.py │ │ │ │ ├── fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py │ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_1x_coco.py │ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_fp16_1x_bs8x8_coco.py │ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py │ │ │ │ ├── fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── foveabox/ │ │ │ │ ├── fovea_align_r101_fpn_gn-head_4x4_2x_coco.py │ │ │ │ ├── fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── fovea_align_r50_fpn_gn-head_4x4_2x_coco.py │ │ │ │ ├── fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── fovea_r101_fpn_4x4_1x_coco.py │ │ │ │ ├── fovea_r101_fpn_4x4_2x_coco.py │ │ │ │ ├── fovea_r50_fpn_4x4_1x_coco.py │ │ │ │ ├── fovea_r50_fpn_4x4_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── fpg/ │ │ │ │ ├── faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py │ │ │ │ ├── faster_rcnn_r50_fpg_crop640_50e_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_crop640_50e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpg_crop640_50e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_crop640_50e_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_r50_fpg-chn128_crop640_50e_coco.py │ │ │ │ └── retinanet_r50_fpg_crop640_50e_coco.py │ │ │ ├── free_anchor/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_free_anchor_r101_fpn_1x_coco.py │ │ │ │ ├── retinanet_free_anchor_r50_fpn_1x_coco.py │ │ │ │ └── retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py │ │ │ ├── fsaf/ │ │ │ │ ├── fsaf_r101_fpn_1x_coco.py │ │ │ │ ├── fsaf_r50_fpn_1x_coco.py │ │ │ │ ├── fsaf_x101_64x4d_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── gcnet/ │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── gfl/ │ │ │ │ ├── gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── gfl_r101_fpn_mstrain_2x_coco.py │ │ │ │ ├── gfl_r50_fpn_1x_coco.py │ │ │ │ ├── gfl_r50_fpn_mstrain_2x_coco.py │ │ │ │ ├── gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py │ │ │ │ ├── gfl_x101_32x4d_fpn_mstrain_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── ghm/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_ghm_r101_fpn_1x_coco.py │ │ │ │ ├── retinanet_ghm_r50_fpn_1x_coco.py │ │ │ │ ├── retinanet_ghm_x101_32x4d_fpn_1x_coco.py │ │ │ │ └── retinanet_ghm_x101_64x4d_fpn_1x_coco.py │ │ │ ├── gn/ │ │ │ │ ├── mask_rcnn_r101_fpn_gn-all_2x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_gn-all_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── gn+ws/ │ │ │ │ ├── faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py │ │ │ │ ├── faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py │ │ │ │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ │ │ │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── grid_rcnn/ │ │ │ │ ├── grid_rcnn_r101_fpn_gn-head_2x_coco.py │ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_1x_coco.py │ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_2x_coco.py │ │ │ │ ├── grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py │ │ │ │ ├── grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── groie/ │ │ │ │ ├── faster_rcnn_r50_fpn_groie_1x_coco.py │ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_groie_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── guided_anchoring/ │ │ │ │ ├── ga_fast_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_faster_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_faster_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_faster_r50_fpn_1x_coco.py │ │ │ │ ├── ga_faster_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── ga_faster_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py │ │ │ │ ├── ga_retinanet_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_r50_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_r50_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_x101_64x4d_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── hrnet/ │ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py │ │ │ │ ├── cascade_rcnn_hrnetv2p_w18_20e_coco.py │ │ │ │ ├── cascade_rcnn_hrnetv2p_w32_20e_coco.py │ │ │ │ ├── cascade_rcnn_hrnetv2p_w40_20e_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w18_1x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w18_2x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w32_1x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w32_2x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w40_1x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w40_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── htc_hrnetv2p_w18_20e_coco.py │ │ │ │ ├── htc_hrnetv2p_w32_20e_coco.py │ │ │ │ ├── htc_hrnetv2p_w40_20e_coco.py │ │ │ │ ├── htc_hrnetv2p_w40_28e_coco.py │ │ │ │ ├── htc_x101_64x4d_fpn_16x1_28e_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w18_1x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w18_2x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w32_1x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w32_2x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w40_1x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w40_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── htc/ │ │ │ │ ├── htc_r101_fpn_20e_coco.py │ │ │ │ ├── htc_r50_fpn_1x_coco.py │ │ │ │ ├── htc_r50_fpn_20e_coco.py │ │ │ │ ├── htc_without_semantic_r50_fpn_1x_coco.py │ │ │ │ ├── htc_x101_32x4d_fpn_16x1_20e_coco.py │ │ │ │ ├── htc_x101_64x4d_fpn_16x1_20e_coco.py │ │ │ │ ├── htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── instaboost/ │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_instaboost_4x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_instaboost_4x_coco.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── lad/ │ │ │ │ ├── lad_r101_paa_r50_fpn_coco_1x.py │ │ │ │ ├── lad_r50_paa_r101_fpn_coco_1x.py │ │ │ │ └── metafile.yml │ │ │ ├── ld/ │ │ │ │ ├── ld_r101_gflv1_r101dcn_fpn_coco_2x.py │ │ │ │ ├── ld_r18_gflv1_r101_fpn_coco_1x.py │ │ │ │ ├── ld_r34_gflv1_r101_fpn_coco_1x.py │ │ │ │ ├── ld_r50_gflv1_r101_fpn_coco_1x.py │ │ │ │ └── metafile.yml │ │ │ ├── legacy_1.x/ │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco_v1.py │ │ │ │ ├── faster_rcnn_r50_fpn_1x_coco_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_1x_coco_v1.py │ │ │ │ ├── retinanet_r50_caffe_fpn_1x_coco_v1.py │ │ │ │ ├── retinanet_r50_fpn_1x_coco_v1.py │ │ │ │ └── ssd300_coco_v1.py │ │ │ ├── libra_rcnn/ │ │ │ │ ├── libra_fast_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── libra_faster_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── libra_faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── libra_retinanet_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── lvis/ │ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ │ │ └── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ │ ├── mask2former/ │ │ │ │ ├── mask2former_r101_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_r101_lsj_8x2_50e_coco.py │ │ │ │ ├── mask2former_r50_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_r50_lsj_8x2_50e_coco.py │ │ │ │ ├── mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py │ │ │ │ ├── mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── mask_rcnn/ │ │ │ │ ├── mask_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_2x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_c4_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_1x_wandb_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_fp16_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_poly_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_2x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_2x_coco.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── maskformer/ │ │ │ │ ├── maskformer_r50_mstrain_16x1_75e_coco.py │ │ │ │ ├── maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── ms_rcnn/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── ms_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── ms_rcnn_r101_caffe_fpn_2x_coco.py │ │ │ │ ├── ms_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ms_rcnn_r50_caffe_fpn_2x_coco.py │ │ │ │ ├── ms_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── ms_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── ms_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ └── ms_rcnn_x101_64x4d_fpn_2x_coco.py │ │ │ ├── nas_fcos/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ │ │ └── nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ │ ├── nas_fpn/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_r50_fpn_crop640_50e_coco.py │ │ │ │ └── retinanet_r50_nasfpn_crop640_50e_coco.py │ │ │ ├── openimages/ │ │ │ │ ├── faster_rcnn_r50_fpn_32x2_1x_openimages.py │ │ │ │ ├── faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py │ │ │ │ ├── faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py │ │ │ │ ├── faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_r50_fpn_32x2_1x_openimages.py │ │ │ │ └── ssd300_32x8_36e_openimages.py │ │ │ ├── paa/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── paa_r101_fpn_1x_coco.py │ │ │ │ ├── paa_r101_fpn_2x_coco.py │ │ │ │ ├── paa_r101_fpn_mstrain_3x_coco.py │ │ │ │ ├── paa_r50_fpn_1.5x_coco.py │ │ │ │ ├── paa_r50_fpn_1x_coco.py │ │ │ │ ├── paa_r50_fpn_2x_coco.py │ │ │ │ └── paa_r50_fpn_mstrain_3x_coco.py │ │ │ ├── pafpn/ │ │ │ │ ├── faster_rcnn_r50_pafpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── panoptic_fpn/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── panoptic_fpn_r101_fpn_1x_coco.py │ │ │ │ ├── panoptic_fpn_r101_fpn_mstrain_3x_coco.py │ │ │ │ ├── panoptic_fpn_r50_fpn_1x_coco.py │ │ │ │ └── panoptic_fpn_r50_fpn_mstrain_3x_coco.py │ │ │ ├── pascal_voc/ │ │ │ │ ├── faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py │ │ │ │ ├── faster_rcnn_r50_fpn_1x_voc0712.py │ │ │ │ ├── faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py │ │ │ │ ├── retinanet_r50_fpn_1x_voc0712.py │ │ │ │ ├── ssd300_voc0712.py │ │ │ │ └── ssd512_voc0712.py │ │ │ ├── pisa/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── pisa_faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── pisa_mask_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── pisa_retinanet_r50_fpn_1x_coco.py │ │ │ │ ├── pisa_retinanet_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── pisa_ssd300_coco.py │ │ │ │ └── pisa_ssd512_coco.py │ │ │ ├── point_rend/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── point_rend_r50_caffe_fpn_mstrain_1x_coco.py │ │ │ │ └── point_rend_r50_caffe_fpn_mstrain_3x_coco.py │ │ │ ├── pvt/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_pvt-l_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvt-m_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvt-s_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvt-t_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b0_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b1_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b2_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b3_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b4_fpn_1x_coco.py │ │ │ │ └── retinanet_pvtv2-b5_fpn_1x_coco.py │ │ │ ├── queryinst/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py │ │ │ │ ├── queryinst_r101_fpn_mstrain_480-800_3x_coco.py │ │ │ │ ├── queryinst_r50_fpn_1x_coco.py │ │ │ │ ├── queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py │ │ │ │ └── queryinst_r50_fpn_mstrain_480-800_3x_coco.py │ │ │ ├── regnet/ │ │ │ │ ├── cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-12GF_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-4GF_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-8GF_fpn_1x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_regnetx-1.6GF_fpn_1x_coco.py │ │ │ │ ├── retinanet_regnetx-3.2GF_fpn_1x_coco.py │ │ │ │ └── retinanet_regnetx-800MF_fpn_1x_coco.py │ │ │ ├── reppoints/ │ │ │ │ ├── bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py │ │ │ │ ├── bbox_r50_grid_fpn_gn-neck+head_1x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py │ │ │ │ ├── reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ │ │ │ ├── reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py │ │ │ │ ├── reppoints_moment_r50_fpn_1x_coco.py │ │ │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py │ │ │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py │ │ │ │ ├── reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ │ │ │ └── reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py │ │ │ ├── res2net/ │ │ │ │ ├── cascade_mask_rcnn_r2_101_fpn_20e_coco.py │ │ │ │ ├── cascade_rcnn_r2_101_fpn_20e_coco.py │ │ │ │ ├── faster_rcnn_r2_101_fpn_2x_coco.py │ │ │ │ ├── htc_r2_101_fpn_20e_coco.py │ │ │ │ ├── mask_rcnn_r2_101_fpn_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── resnest/ │ │ │ │ ├── cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py │ │ │ │ ├── cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py │ │ │ │ ├── cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py │ │ │ │ ├── faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py │ │ │ │ ├── faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py │ │ │ │ ├── mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py │ │ │ │ ├── mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── resnet_strikes_back/ │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ └── retinanet_r50_fpn_rsb-pretrain_1x_coco.py │ │ │ ├── retinanet/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── retinanet_r101_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── retinanet_r101_fpn_1x_coco.py │ │ │ │ ├── retinanet_r101_fpn_2x_coco.py │ │ │ │ ├── retinanet_r101_fpn_mstrain_640-800_3x_coco.py │ │ │ │ ├── retinanet_r18_fpn_1x8_1x_coco.py │ │ │ │ ├── retinanet_r18_fpn_1x_coco.py │ │ │ │ ├── retinanet_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_1x_coco.py │ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_2x_coco.py │ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── retinanet_r50_fpn_1x_coco.py │ │ │ │ ├── retinanet_r50_fpn_2x_coco.py │ │ │ │ ├── retinanet_r50_fpn_90k_coco.py │ │ │ │ ├── retinanet_r50_fpn_fp16_1x_coco.py │ │ │ │ ├── retinanet_r50_fpn_mstrain_640-800_3x_coco.py │ │ │ │ ├── retinanet_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── retinanet_x101_32x4d_fpn_2x_coco.py │ │ │ │ ├── retinanet_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── retinanet_x101_64x4d_fpn_2x_coco.py │ │ │ │ └── retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py │ │ │ ├── rpn/ │ │ │ │ ├── rpn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── rpn_r101_fpn_1x_coco.py │ │ │ │ ├── rpn_r101_fpn_2x_coco.py │ │ │ │ ├── rpn_r50_caffe_c4_1x_coco.py │ │ │ │ ├── rpn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── rpn_r50_fpn_1x_coco.py │ │ │ │ ├── rpn_r50_fpn_2x_coco.py │ │ │ │ ├── rpn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── rpn_x101_32x4d_fpn_2x_coco.py │ │ │ │ ├── rpn_x101_64x4d_fpn_1x_coco.py │ │ │ │ └── rpn_x101_64x4d_fpn_2x_coco.py │ │ │ ├── sabl/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── sabl_cascade_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── sabl_cascade_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── sabl_faster_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── sabl_faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── sabl_retinanet_r101_fpn_1x_coco.py │ │ │ │ ├── sabl_retinanet_r101_fpn_gn_1x_coco.py │ │ │ │ ├── sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py │ │ │ │ ├── sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py │ │ │ │ ├── sabl_retinanet_r50_fpn_1x_coco.py │ │ │ │ └── sabl_retinanet_r50_fpn_gn_1x_coco.py │ │ │ ├── scnet/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── scnet_r101_fpn_20e_coco.py │ │ │ │ ├── scnet_r50_fpn_1x_coco.py │ │ │ │ ├── scnet_r50_fpn_20e_coco.py │ │ │ │ ├── scnet_x101_64x4d_fpn_20e_coco.py │ │ │ │ └── scnet_x101_64x4d_fpn_8x1_20e_coco.py │ │ │ ├── scratch/ │ │ │ │ ├── faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── seesaw_loss/ │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ └── metafile.yml │ │ │ ├── selfsup_pretrain/ │ │ │ │ ├── mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py │ │ │ │ └── mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py │ │ │ ├── simple_copy_paste/ │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── solo/ │ │ │ │ ├── decoupled_solo_light_r50_fpn_3x_coco.py │ │ │ │ ├── decoupled_solo_r50_fpn_1x_coco.py │ │ │ │ ├── decoupled_solo_r50_fpn_3x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── solo_r50_fpn_1x_coco.py │ │ │ │ └── solo_r50_fpn_3x_coco.py │ │ │ ├── solov2/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── solov2_light_r18_fpn_3x_coco.py │ │ │ │ ├── solov2_light_r34_fpn_3x_coco.py │ │ │ │ ├── solov2_light_r50_dcn_fpn_3x_coco.py │ │ │ │ ├── solov2_light_r50_fpn_3x_coco.py │ │ │ │ ├── solov2_r101_dcn_fpn_3x_coco.py │ │ │ │ ├── solov2_r101_fpn_3x_coco.py │ │ │ │ ├── solov2_r50_fpn_1x_coco.py │ │ │ │ ├── solov2_r50_fpn_3x_coco.py │ │ │ │ └── solov2_x101_dcn_fpn_3x_coco.py │ │ │ ├── sparse_rcnn/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py │ │ │ │ ├── sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py │ │ │ │ ├── sparse_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py │ │ │ │ └── sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py │ │ │ ├── ssd/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── ssd300_coco.py │ │ │ │ ├── ssd300_fp16_coco.py │ │ │ │ ├── ssd512_coco.py │ │ │ │ ├── ssd512_fp16_coco.py │ │ │ │ └── ssdlite_mobilenetv2_scratch_600e_coco.py │ │ │ ├── strong_baselines/ │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py │ │ │ │ └── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py │ │ │ ├── swin/ │ │ │ │ ├── mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py │ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py │ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ └── retinanet_swin-t-p4-w7_fpn_1x_coco.py │ │ │ ├── timm_example/ │ │ │ │ ├── retinanet_timm_efficientnet_b1_fpn_1x_coco.py │ │ │ │ └── retinanet_timm_tv_resnet50_fpn_1x_coco.py │ │ │ ├── tood/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── tood_r101_fpn_mstrain_2x_coco.py │ │ │ │ ├── tood_r50_fpn_1x_coco.py │ │ │ │ ├── tood_r50_fpn_anchor_based_1x_coco.py │ │ │ │ ├── tood_r50_fpn_mstrain_2x_coco.py │ │ │ │ ├── tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py │ │ │ │ └── tood_x101_64x4d_fpn_mstrain_2x_coco.py │ │ │ ├── tridentnet/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── tridentnet_r50_caffe_1x_coco.py │ │ │ │ ├── tridentnet_r50_caffe_mstrain_1x_coco.py │ │ │ │ └── tridentnet_r50_caffe_mstrain_3x_coco.py │ │ │ ├── vfnet/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── vfnet_r101_fpn_1x_coco.py │ │ │ │ ├── vfnet_r101_fpn_2x_coco.py │ │ │ │ ├── vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r101_fpn_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r2_101_fpn_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r50_fpn_1x_coco.py │ │ │ │ ├── vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r50_fpn_mstrain_2x_coco.py │ │ │ │ ├── vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── vfnet_x101_32x4d_fpn_mstrain_2x_coco.py │ │ │ │ ├── vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ └── vfnet_x101_64x4d_fpn_mstrain_2x_coco.py │ │ │ ├── wider_face/ │ │ │ │ └── ssd300_wider_face.py │ │ │ ├── yolact/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── yolact_r101_1x8_coco.py │ │ │ │ ├── yolact_r50_1x8_coco.py │ │ │ │ └── yolact_r50_8x8_coco.py │ │ │ ├── yolo/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── yolov3_d53_320_273e_coco.py │ │ │ │ ├── yolov3_d53_fp16_mstrain-608_273e_coco.py │ │ │ │ ├── yolov3_d53_mstrain-416_273e_coco.py │ │ │ │ ├── yolov3_d53_mstrain-608_273e_coco.py │ │ │ │ ├── yolov3_mobilenetv2_320_300e_coco.py │ │ │ │ └── yolov3_mobilenetv2_mstrain-416_300e_coco.py │ │ │ ├── yolof/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── yolof_r50_c5_8x8_1x_coco.py │ │ │ │ └── yolof_r50_c5_8x8_iter-1x_coco.py │ │ │ └── yolox/ │ │ │ ├── metafile.yml │ │ │ ├── yolox_l_8x8_300e_coco.py │ │ │ ├── yolox_m_8x8_300e_coco.py │ │ │ ├── yolox_nano_8x8_300e_coco.py │ │ │ ├── yolox_s_8x8_300e_coco.py │ │ │ ├── yolox_tiny_8x8_300e_coco.py │ │ │ └── yolox_x_8x8_300e_coco.py │ │ ├── docs/ │ │ │ ├── en/ │ │ │ │ ├── Makefile │ │ │ │ ├── _static/ │ │ │ │ │ └── css/ │ │ │ │ │ └── readthedocs.css │ │ │ │ ├── api.rst │ │ │ │ ├── conf.py │ │ │ │ ├── index.rst │ │ │ │ ├── make.bat │ │ │ │ ├── stat.py │ │ │ │ └── tutorials/ │ │ │ │ └── index.rst │ │ │ └── zh_cn/ │ │ │ ├── Makefile │ │ │ ├── _static/ │ │ │ │ └── css/ │ │ │ │ └── readthedocs.css │ │ │ ├── api.rst │ │ │ ├── conf.py │ │ │ ├── index.rst │ │ │ ├── make.bat │ │ │ ├── stat.py │ │ │ └── tutorials/ │ │ │ └── index.rst │ │ ├── mmdet/ │ │ │ ├── __init__.py │ │ │ ├── apis/ │ │ │ │ ├── __init__.py │ │ │ │ ├── inference.py │ │ │ │ ├── test.py │ │ │ │ └── train.py │ │ │ ├── core/ │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── anchor_generator.py │ │ │ │ │ ├── builder.py │ │ │ │ │ ├── point_generator.py │ │ │ │ │ └── utils.py │ │ │ │ ├── bbox/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── assigners/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── approx_max_iou_assigner.py │ │ │ │ │ │ ├── assign_result.py │ │ │ │ │ │ ├── atss_assigner.py │ │ │ │ │ │ ├── base_assigner.py │ │ │ │ │ │ ├── center_region_assigner.py │ │ │ │ │ │ ├── grid_assigner.py │ │ │ │ │ │ ├── hungarian_assigner.py │ │ │ │ │ │ ├── mask_hungarian_assigner.py │ │ │ │ │ │ ├── max_iou_assigner.py │ │ │ │ │ │ ├── point_assigner.py │ │ │ │ │ │ ├── region_assigner.py │ │ │ │ │ │ ├── sim_ota_assigner.py │ │ │ │ │ │ ├── task_aligned_assigner.py │ │ │ │ │ │ └── uniform_assigner.py │ │ │ │ │ ├── builder.py │ │ │ │ │ ├── coder/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base_bbox_coder.py │ │ │ │ │ │ ├── bucketing_bbox_coder.py │ │ │ │ │ │ ├── delta_xywh_bbox_coder.py │ │ │ │ │ │ ├── distance_point_bbox_coder.py │ │ │ │ │ │ ├── legacy_delta_xywh_bbox_coder.py │ │ │ │ │ │ ├── pseudo_bbox_coder.py │ │ │ │ │ │ ├── tblr_bbox_coder.py │ │ │ │ │ │ └── yolo_bbox_coder.py │ │ │ │ │ ├── demodata.py │ │ │ │ │ ├── iou_calculators/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── builder.py │ │ │ │ │ │ └── iou2d_calculator.py │ │ │ │ │ ├── match_costs/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── builder.py │ │ │ │ │ │ └── match_cost.py │ │ │ │ │ ├── samplers/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base_sampler.py │ │ │ │ │ │ ├── combined_sampler.py │ │ │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ │ │ ├── mask_pseudo_sampler.py │ │ │ │ │ │ ├── mask_sampling_result.py │ │ │ │ │ │ ├── ohem_sampler.py │ │ │ │ │ │ ├── pseudo_sampler.py │ │ │ │ │ │ ├── random_sampler.py │ │ │ │ │ │ ├── sampling_result.py │ │ │ │ │ │ └── score_hlr_sampler.py │ │ │ │ │ └── transforms.py │ │ │ │ ├── data_structures/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── general_data.py │ │ │ │ │ └── instance_data.py │ │ │ │ ├── evaluation/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bbox_overlaps.py │ │ │ │ │ ├── class_names.py │ │ │ │ │ ├── eval_hooks.py │ │ │ │ │ ├── mean_ap.py │ │ │ │ │ ├── panoptic_utils.py │ │ │ │ │ └── recall.py │ │ │ │ ├── export/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── model_wrappers.py │ │ │ │ │ ├── onnx_helper.py │ │ │ │ │ └── pytorch2onnx.py │ │ │ │ ├── hook/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── checkloss_hook.py │ │ │ │ │ ├── ema.py │ │ │ │ │ ├── memory_profiler_hook.py │ │ │ │ │ ├── set_epoch_info_hook.py │ │ │ │ │ ├── sync_norm_hook.py │ │ │ │ │ ├── sync_random_size_hook.py │ │ │ │ │ ├── wandblogger_hook.py │ │ │ │ │ ├── yolox_lrupdater_hook.py │ │ │ │ │ └── yolox_mode_switch_hook.py │ │ │ │ ├── mask/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── mask_target.py │ │ │ │ │ ├── structures.py │ │ │ │ │ └── utils.py │ │ │ │ ├── optimizers/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── builder.py │ │ │ │ │ └── layer_decay_optimizer_constructor.py │ │ │ │ ├── post_processing/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bbox_nms.py │ │ │ │ │ ├── matrix_nms.py │ │ │ │ │ └── merge_augs.py │ │ │ │ ├── utils/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dist_utils.py │ │ │ │ │ └── misc.py │ │ │ │ └── visualization/ │ │ │ │ ├── __init__.py │ │ │ │ ├── image.py │ │ │ │ └── palette.py │ │ │ ├── models/ │ │ │ │ ├── __init__.py │ │ │ │ ├── backbones/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── csp_darknet.py │ │ │ │ │ ├── darknet.py │ │ │ │ │ ├── detectors_resnet.py │ │ │ │ │ ├── detectors_resnext.py │ │ │ │ │ ├── efficientnet.py │ │ │ │ │ ├── hourglass.py │ │ │ │ │ ├── hrnet.py │ │ │ │ │ ├── mobilenet_v2.py │ │ │ │ │ ├── pvt.py │ │ │ │ │ ├── regnet.py │ │ │ │ │ ├── res2net.py │ │ │ │ │ ├── resnest.py │ │ │ │ │ ├── resnet.py │ │ │ │ │ ├── resnext.py │ │ │ │ │ ├── ssd_vgg.py │ │ │ │ │ ├── swin.py │ │ │ │ │ └── trident_resnet.py │ │ │ │ ├── builder.py │ │ │ │ ├── dense_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── anchor_free_head.py │ │ │ │ │ ├── anchor_head.py │ │ │ │ │ ├── atss_head.py │ │ │ │ │ ├── autoassign_head.py │ │ │ │ │ ├── base_dense_head.py │ │ │ │ │ ├── base_mask_head.py │ │ │ │ │ ├── cascade_rpn_head.py │ │ │ │ │ ├── centernet_head.py │ │ │ │ │ ├── centripetal_head.py │ │ │ │ │ ├── corner_head.py │ │ │ │ │ ├── ddod_head.py │ │ │ │ │ ├── deformable_detr_head.py │ │ │ │ │ ├── dense_test_mixins.py │ │ │ │ │ ├── detr_head.py │ │ │ │ │ ├── embedding_rpn_head.py │ │ │ │ │ ├── fcos_head.py │ │ │ │ │ ├── fovea_head.py │ │ │ │ │ ├── free_anchor_retina_head.py │ │ │ │ │ ├── fsaf_head.py │ │ │ │ │ ├── ga_retina_head.py │ │ │ │ │ ├── ga_rpn_head.py │ │ │ │ │ ├── gfl_head.py │ │ │ │ │ ├── guided_anchor_head.py │ │ │ │ │ ├── lad_head.py │ │ │ │ │ ├── ld_head.py │ │ │ │ │ ├── mask2former_head.py │ │ │ │ │ ├── maskformer_head.py │ │ │ │ │ ├── nasfcos_head.py │ │ │ │ │ ├── paa_head.py │ │ │ │ │ ├── pisa_retinanet_head.py │ │ │ │ │ ├── pisa_ssd_head.py │ │ │ │ │ ├── reppoints_head.py │ │ │ │ │ ├── retina_head.py │ │ │ │ │ ├── retina_sepbn_head.py │ │ │ │ │ ├── rpn_head.py │ │ │ │ │ ├── sabl_retina_head.py │ │ │ │ │ ├── solo_head.py │ │ │ │ │ ├── solov2_head.py │ │ │ │ │ ├── ssd_head.py │ │ │ │ │ ├── tood_head.py │ │ │ │ │ ├── vfnet_head.py │ │ │ │ │ ├── yolact_head.py │ │ │ │ │ ├── yolo_head.py │ │ │ │ │ ├── yolof_head.py │ │ │ │ │ └── yolox_head.py │ │ │ │ ├── detectors/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── atss.py │ │ │ │ │ ├── autoassign.py │ │ │ │ │ ├── base.py │ │ │ │ │ ├── cascade_rcnn.py │ │ │ │ │ ├── centernet.py │ │ │ │ │ ├── cornernet.py │ │ │ │ │ ├── ddod.py │ │ │ │ │ ├── deformable_detr.py │ │ │ │ │ ├── detr.py │ │ │ │ │ ├── fast_rcnn.py │ │ │ │ │ ├── faster_rcnn.py │ │ │ │ │ ├── fcos.py │ │ │ │ │ ├── fovea.py │ │ │ │ │ ├── fsaf.py │ │ │ │ │ ├── gfl.py │ │ │ │ │ ├── grid_rcnn.py │ │ │ │ │ ├── htc.py │ │ │ │ │ ├── kd_one_stage.py │ │ │ │ │ ├── lad.py │ │ │ │ │ ├── mask2former.py │ │ │ │ │ ├── mask_rcnn.py │ │ │ │ │ ├── mask_scoring_rcnn.py │ │ │ │ │ ├── maskformer.py │ │ │ │ │ ├── nasfcos.py │ │ │ │ │ ├── paa.py │ │ │ │ │ ├── panoptic_fpn.py │ │ │ │ │ ├── panoptic_two_stage_segmentor.py │ │ │ │ │ ├── point_rend.py │ │ │ │ │ ├── queryinst.py │ │ │ │ │ ├── reppoints_detector.py │ │ │ │ │ ├── retinanet.py │ │ │ │ │ ├── rpn.py │ │ │ │ │ ├── scnet.py │ │ │ │ │ ├── single_stage.py │ │ │ │ │ ├── single_stage_instance_seg.py │ │ │ │ │ ├── solo.py │ │ │ │ │ ├── solov2.py │ │ │ │ │ ├── sparse_rcnn.py │ │ │ │ │ ├── tood.py │ │ │ │ │ ├── trident_faster_rcnn.py │ │ │ │ │ ├── two_stage.py │ │ │ │ │ ├── vfnet.py │ │ │ │ │ ├── yolact.py │ │ │ │ │ ├── yolo.py │ │ │ │ │ ├── yolof.py │ │ │ │ │ └── yolox.py │ │ │ │ ├── losses/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── accuracy.py │ │ │ │ │ ├── ae_loss.py │ │ │ │ │ ├── balanced_l1_loss.py │ │ │ │ │ ├── cross_entropy_loss.py │ │ │ │ │ ├── dice_loss.py │ │ │ │ │ ├── focal_loss.py │ │ │ │ │ ├── gaussian_focal_loss.py │ │ │ │ │ ├── gfocal_loss.py │ │ │ │ │ ├── ghm_loss.py │ │ │ │ │ ├── iou_loss.py │ │ │ │ │ ├── kd_loss.py │ │ │ │ │ ├── mse_loss.py │ │ │ │ │ ├── pisa_loss.py │ │ │ │ │ ├── seesaw_loss.py │ │ │ │ │ ├── smooth_l1_loss.py │ │ │ │ │ ├── utils.py │ │ │ │ │ └── varifocal_loss.py │ │ │ │ ├── necks/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bfp.py │ │ │ │ │ ├── channel_mapper.py │ │ │ │ │ ├── ct_resnet_neck.py │ │ │ │ │ ├── dilated_encoder.py │ │ │ │ │ ├── dyhead.py │ │ │ │ │ ├── fpg.py │ │ │ │ │ ├── fpn.py │ │ │ │ │ ├── fpn_carafe.py │ │ │ │ │ ├── hrfpn.py │ │ │ │ │ ├── nas_fpn.py │ │ │ │ │ ├── nasfcos_fpn.py │ │ │ │ │ ├── pafpn.py │ │ │ │ │ ├── rfp.py │ │ │ │ │ ├── ssd_neck.py │ │ │ │ │ ├── yolo_neck.py │ │ │ │ │ └── yolox_pafpn.py │ │ │ │ ├── plugins/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dropblock.py │ │ │ │ │ ├── msdeformattn_pixel_decoder.py │ │ │ │ │ └── pixel_decoder.py │ │ │ │ ├── roi_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_roi_head.py │ │ │ │ │ ├── bbox_heads/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── bbox_head.py │ │ │ │ │ │ ├── convfc_bbox_head.py │ │ │ │ │ │ ├── dii_head.py │ │ │ │ │ │ ├── double_bbox_head.py │ │ │ │ │ │ ├── sabl_head.py │ │ │ │ │ │ └── scnet_bbox_head.py │ │ │ │ │ ├── cascade_roi_head.py │ │ │ │ │ ├── double_roi_head.py │ │ │ │ │ ├── dynamic_roi_head.py │ │ │ │ │ ├── grid_roi_head.py │ │ │ │ │ ├── htc_roi_head.py │ │ │ │ │ ├── mask_heads/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── coarse_mask_head.py │ │ │ │ │ │ ├── dynamic_mask_head.py │ │ │ │ │ │ ├── fcn_mask_head.py │ │ │ │ │ │ ├── feature_relay_head.py │ │ │ │ │ │ ├── fused_semantic_head.py │ │ │ │ │ │ ├── global_context_head.py │ │ │ │ │ │ ├── grid_head.py │ │ │ │ │ │ ├── htc_mask_head.py │ │ │ │ │ │ ├── mask_point_head.py │ │ │ │ │ │ ├── maskiou_head.py │ │ │ │ │ │ ├── scnet_mask_head.py │ │ │ │ │ │ └── scnet_semantic_head.py │ │ │ │ │ ├── mask_scoring_roi_head.py │ │ │ │ │ ├── pisa_roi_head.py │ │ │ │ │ ├── point_rend_roi_head.py │ │ │ │ │ ├── roi_extractors/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base_roi_extractor.py │ │ │ │ │ │ ├── generic_roi_extractor.py │ │ │ │ │ │ └── single_level_roi_extractor.py │ │ │ │ │ ├── scnet_roi_head.py │ │ │ │ │ ├── shared_heads/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── res_layer.py │ │ │ │ │ ├── sparse_roi_head.py │ │ │ │ │ ├── standard_roi_head.py │ │ │ │ │ ├── test_mixins.py │ │ │ │ │ └── trident_roi_head.py │ │ │ │ ├── seg_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_semantic_head.py │ │ │ │ │ ├── panoptic_fpn_head.py │ │ │ │ │ └── panoptic_fusion_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_panoptic_fusion_head.py │ │ │ │ │ ├── heuristic_fusion_head.py │ │ │ │ │ └── maskformer_fusion_head.py │ │ │ │ └── utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── brick_wrappers.py │ │ │ │ ├── builder.py │ │ │ │ ├── ckpt_convert.py │ │ │ │ ├── conv_upsample.py │ │ │ │ ├── csp_layer.py │ │ │ │ ├── gaussian_target.py │ │ │ │ ├── inverted_residual.py │ │ │ │ ├── make_divisible.py │ │ │ │ ├── misc.py │ │ │ │ ├── normed_predictor.py │ │ │ │ ├── panoptic_gt_processing.py │ │ │ │ ├── point_sample.py │ │ │ │ ├── positional_encoding.py │ │ │ │ ├── res_layer.py │ │ │ │ ├── se_layer.py │ │ │ │ └── transformer.py │ │ │ ├── utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── collect_env.py │ │ │ │ ├── compat_config.py │ │ │ │ ├── contextmanagers.py │ │ │ │ ├── logger.py │ │ │ │ ├── memory.py │ │ │ │ ├── misc.py │ │ │ │ ├── profiling.py │ │ │ │ ├── replace_cfg_vals.py │ │ │ │ ├── setup_env.py │ │ │ │ ├── split_batch.py │ │ │ │ ├── util_distribution.py │ │ │ │ ├── util_mixins.py │ │ │ │ └── util_random.py │ │ │ └── version.py │ │ ├── model-index.yml │ │ ├── pytest.ini │ │ ├── requirements/ │ │ │ ├── albu.txt │ │ │ ├── build.txt │ │ │ ├── docs.txt │ │ │ ├── mminstall.txt │ │ │ ├── optional.txt │ │ │ ├── readthedocs.txt │ │ │ ├── runtime.txt │ │ │ └── tests.txt │ │ ├── requirements.txt │ │ ├── setup.cfg │ │ ├── setup.py │ │ ├── tests/ │ │ │ ├── test_data/ │ │ │ │ ├── test_datasets/ │ │ │ │ │ ├── test_coco_dataset.py │ │ │ │ │ ├── test_common.py │ │ │ │ │ ├── test_custom_dataset.py │ │ │ │ │ ├── test_dataset_wrapper.py │ │ │ │ │ ├── test_openimages_dataset.py │ │ │ │ │ ├── test_panoptic_dataset.py │ │ │ │ │ └── test_xml_dataset.py │ │ │ │ ├── test_pipelines/ │ │ │ │ │ ├── test_formatting.py │ │ │ │ │ ├── test_loading.py │ │ │ │ │ ├── test_sampler.py │ │ │ │ │ └── test_transform/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_img_augment.py │ │ │ │ │ ├── test_models_aug_test.py │ │ │ │ │ ├── test_rotate.py │ │ │ │ │ ├── test_shear.py │ │ │ │ │ ├── test_transform.py │ │ │ │ │ ├── test_translate.py │ │ │ │ │ └── utils.py │ │ │ │ └── test_utils.py │ │ │ ├── test_downstream/ │ │ │ │ └── test_mmtrack.py │ │ │ ├── test_metrics/ │ │ │ │ ├── test_box_overlap.py │ │ │ │ ├── test_losses.py │ │ │ │ ├── test_mean_ap.py │ │ │ │ └── test_recall.py │ │ │ ├── test_models/ │ │ │ │ ├── test_backbones/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_csp_darknet.py │ │ │ │ │ ├── test_detectors_resnet.py │ │ │ │ │ ├── test_efficientnet.py │ │ │ │ │ ├── test_hourglass.py │ │ │ │ │ ├── test_hrnet.py │ │ │ │ │ ├── test_mobilenet_v2.py │ │ │ │ │ ├── test_pvt.py │ │ │ │ │ ├── test_regnet.py │ │ │ │ │ ├── test_renext.py │ │ │ │ │ ├── test_res2net.py │ │ │ │ │ ├── test_resnest.py │ │ │ │ │ ├── test_resnet.py │ │ │ │ │ ├── test_swin.py │ │ │ │ │ ├── test_trident_resnet.py │ │ │ │ │ └── utils.py │ │ │ │ ├── test_dense_heads/ │ │ │ │ │ ├── test_anchor_head.py │ │ │ │ │ ├── test_atss_head.py │ │ │ │ │ ├── test_autoassign_head.py │ │ │ │ │ ├── test_centernet_head.py │ │ │ │ │ ├── test_corner_head.py │ │ │ │ │ ├── test_ddod_head.py │ │ │ │ │ ├── test_dense_heads_attr.py │ │ │ │ │ ├── test_detr_head.py │ │ │ │ │ ├── test_fcos_head.py │ │ │ │ │ ├── test_fsaf_head.py │ │ │ │ │ ├── test_ga_anchor_head.py │ │ │ │ │ ├── test_gfl_head.py │ │ │ │ │ ├── test_lad_head.py │ │ │ │ │ ├── test_ld_head.py │ │ │ │ │ ├── test_mask2former_head.py │ │ │ │ │ ├── test_maskformer_head.py │ │ │ │ │ ├── test_paa_head.py │ │ │ │ │ ├── test_pisa_head.py │ │ │ │ │ ├── test_sabl_retina_head.py │ │ │ │ │ ├── test_solo_head.py │ │ │ │ │ ├── test_tood_head.py │ │ │ │ │ ├── test_vfnet_head.py │ │ │ │ │ ├── test_yolact_head.py │ │ │ │ │ ├── test_yolof_head.py │ │ │ │ │ └── test_yolox_head.py │ │ │ │ ├── test_forward.py │ │ │ │ ├── test_loss.py │ │ │ │ ├── test_loss_compatibility.py │ │ │ │ ├── test_necks.py │ │ │ │ ├── test_plugins.py │ │ │ │ ├── test_roi_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_bbox_head.py │ │ │ │ │ ├── test_mask_head.py │ │ │ │ │ ├── test_roi_extractor.py │ │ │ │ │ ├── test_sabl_bbox_head.py │ │ │ │ │ └── utils.py │ │ │ │ ├── test_seg_heads/ │ │ │ │ │ └── test_maskformer_fusion_head.py │ │ │ │ └── test_utils/ │ │ │ │ ├── test_brick_wrappers.py │ │ │ │ ├── test_conv_upsample.py │ │ │ │ ├── test_inverted_residual.py │ │ │ │ ├── test_model_misc.py │ │ │ │ ├── test_position_encoding.py │ │ │ │ ├── test_se_layer.py │ │ │ │ └── test_transformer.py │ │ │ ├── test_onnx/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_head.py │ │ │ │ ├── test_neck.py │ │ │ │ └── utils.py │ │ │ ├── test_runtime/ │ │ │ │ ├── async_benchmark.py │ │ │ │ ├── test_apis.py │ │ │ │ ├── test_async.py │ │ │ │ ├── test_config.py │ │ │ │ ├── test_eval_hook.py │ │ │ │ └── test_fp16.py │ │ │ └── test_utils/ │ │ │ ├── test_anchor.py │ │ │ ├── test_assigner.py │ │ │ ├── test_coder.py │ │ │ ├── test_compat_config.py │ │ │ ├── test_general_data.py │ │ │ ├── test_hook.py │ │ │ ├── test_layer_decay_optimizer_constructor.py │ │ │ ├── test_logger.py │ │ │ ├── test_masks.py │ │ │ ├── test_memory.py │ │ │ ├── test_misc.py │ │ │ ├── test_nms.py │ │ │ ├── test_replace_cfg_vals.py │ │ │ ├── test_setup_env.py │ │ │ ├── test_split_batch.py │ │ │ ├── test_version.py │ │ │ └── test_visualization.py │ │ └── tools/ │ │ ├── analysis_tools/ │ │ │ ├── analyze_logs.py │ │ │ ├── analyze_results.py │ │ │ ├── benchmark.py │ │ │ ├── coco_error_analysis.py │ │ │ ├── confusion_matrix.py │ │ │ ├── eval_metric.py │ │ │ ├── get_flops.py │ │ │ ├── optimize_anchors.py │ │ │ ├── robustness_eval.py │ │ │ └── test_robustness.py │ │ ├── dataset_converters/ │ │ │ ├── cityscapes.py │ │ │ ├── images2coco.py │ │ │ └── pascal_voc.py │ │ ├── deployment/ │ │ │ ├── mmdet2torchserve.py │ │ │ ├── mmdet_handler.py │ │ │ ├── onnx2tensorrt.py │ │ │ ├── pytorch2onnx.py │ │ │ ├── test.py │ │ │ └── test_torchserver.py │ │ ├── dist_test.sh │ │ ├── dist_train.sh │ │ ├── misc/ │ │ │ ├── browse_dataset.py │ │ │ ├── download_dataset.py │ │ │ ├── gen_coco_panoptic_test_info.py │ │ │ ├── get_image_metas.py │ │ │ ├── print_config.py │ │ │ └── split_coco.py │ │ ├── model_converters/ │ │ │ ├── detectron2pytorch.py │ │ │ ├── publish_model.py │ │ │ ├── regnet2mmdet.py │ │ │ ├── selfsup2mmdet.py │ │ │ ├── upgrade_model_version.py │ │ │ └── upgrade_ssd_version.py │ │ ├── slurm_test.sh │ │ ├── slurm_train.sh │ │ ├── test.py │ │ └── train.py │ ├── models_menu/ │ │ ├── mmscraper.py │ │ ├── models_json.json │ │ ├── samScraper.py │ │ └── sam_models.json │ ├── setup.py │ ├── tempCodeRunnerFile.py │ └── trackers/ │ ├── __init__.py │ ├── botsort/ │ │ ├── basetrack.py │ │ ├── bot_sort.py │ │ ├── configs/ │ │ │ └── botsort.yaml │ │ ├── gmc.py │ │ ├── kalman_filter.py │ │ ├── matching.py │ │ └── reid_multibackend.py │ ├── bytetrack/ │ │ ├── basetrack.py │ │ ├── byte_tracker.py │ │ ├── configs/ │ │ │ └── bytetrack.yaml │ │ ├── kalman_filter.py │ │ └── matching.py │ ├── deepocsort/ │ │ ├── __init__.py │ │ ├── args.py │ │ ├── association.py │ │ ├── cmc.py │ │ ├── configs/ │ │ │ └── deepocsort.yaml │ │ ├── embedding.py │ │ ├── kalmanfilter.py │ │ ├── ocsort.py │ │ └── reid_multibackend.py │ ├── multi_tracker_zoo.py │ ├── ocsort/ │ │ ├── association.py │ │ ├── configs/ │ │ │ └── ocsort.yaml │ │ ├── kalmanfilter.py │ │ └── ocsort.py │ ├── reid_export.py │ └── strongsort/ │ ├── .gitignore │ ├── __init__.py │ ├── configs/ │ │ └── strongsort.yaml │ ├── deep/ │ │ ├── checkpoint/ │ │ │ └── .gitkeep │ │ ├── models/ │ │ │ ├── __init__.py │ │ │ ├── densenet.py │ │ │ ├── hacnn.py │ │ │ ├── inceptionresnetv2.py │ │ │ ├── inceptionv4.py │ │ │ ├── mlfn.py │ │ │ ├── mobilenetv2.py │ │ │ ├── mudeep.py │ │ │ ├── nasnet.py │ │ │ ├── osnet.py │ │ │ ├── osnet_ain.py │ │ │ ├── pcb.py │ │ │ ├── resnet.py │ │ │ ├── resnet_ibn_a.py │ │ │ ├── resnet_ibn_b.py │ │ │ ├── resnetmid.py │ │ │ ├── senet.py │ │ │ ├── shufflenet.py │ │ │ ├── shufflenetv2.py │ │ │ ├── squeezenet.py │ │ │ └── xception.py │ │ └── reid_model_factory.py │ ├── reid_multibackend.py │ ├── sort/ │ │ ├── __init__.py │ │ ├── detection.py │ │ ├── iou_matching.py │ │ ├── kalman_filter.py │ │ ├── linear_assignment.py │ │ ├── nn_matching.py │ │ ├── preprocessing.py │ │ ├── track.py │ │ └── tracker.py │ ├── strong_sort.py │ └── utils/ │ ├── __init__.py │ ├── asserts.py │ ├── draw.py │ ├── evaluation.py │ ├── io.py │ ├── json_logger.py │ ├── log.py │ ├── parser.py │ └── tools.py ├── LICENSE ├── MANIFEST.in ├── README.md ├── additional_scripts/ │ └── coco_eval.py ├── docs/ │ ├── Installation/ │ │ ├── executable.md │ │ ├── full installation.md │ │ ├── index.yml │ │ └── problems.md │ ├── index.md │ ├── main_features/ │ │ ├── Export.md │ │ ├── SAM.md │ │ ├── index.yml │ │ ├── inputs.md │ │ ├── segmentation.md │ │ └── tracking/ │ │ ├── index.yml │ │ ├── interpolation.md │ │ └── tracking.md │ ├── model_selection/ │ │ ├── index.yml │ │ ├── merge.md │ │ └── model_explorer.md │ ├── retype.yml │ └── user_interface.md ├── releasenotes.md ├── requirements.txt ├── setup.py └── yolo training commands.txt ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/retype-action.yml ================================================ name: Publish Retype powered website to GitHub Pages on: workflow_dispatch: push: branches: - master jobs: publish: name: Publish to retype branch runs-on: ubuntu-latest permissions: contents: write steps: - uses: actions/checkout@v2 - uses: retypeapp/action-build@latest - uses: retypeapp/action-github-pages@latest with: update-branch: true ================================================ FILE: .gitignore ================================================ *.pt *.pth # anything in the folders : datasets , runs runs/ *.lnk *.ps1 *.docx ================================================ FILE: DLTA_AI_app/.flake8 ================================================ [flake8] exclude = .anaconda3/* ignore = E203, E741, W503, W504 ================================================ FILE: DLTA_AI_app/.gitignore ================================================ /.cache/ /.pytest_cache/ /build/ /dist/ /*.egg-info/ *.py[cdo] .DS_Store .idea/ # mp4 *.mp4 # any thing in the folder test_videos test_videos/* saved_models.json ================================================ FILE: DLTA_AI_app/.gitmodules ================================================ [submodule "github2pypi"] path = github2pypi url = https://github.com/wkentaro/github2pypi.git ================================================ FILE: DLTA_AI_app/__main__.py ================================================ import os import sys sys.path.append(os.path.dirname(os.path.realpath(__file__))) os.chdir(os.path.dirname(os.path.realpath(__file__))) from PyQt6 import QtGui, QtWidgets, QtCore from labelme import __appname__ from labelme import __version__ from labelme.utils import newIcon import qdarktheme def main(): app = QtWidgets.QApplication(sys.argv) QtWidgets.QApplication.setHighDpiScaleFactorRoundingPolicy(QtCore.Qt.HighDpiScaleFactorRoundingPolicy.RoundPreferFloor) app.setApplicationName(__appname__) app.setWindowIcon(newIcon("icon")) # create and show splash screen splash_pix = QtGui.QPixmap('labelme/icons/splash_screen.png') splash = QtWidgets.QSplashScreen(splash_pix) # center the splash screen to the original screen size try: from screeninfo import get_monitors original_width = get_monitors()[0].width original_heigth = get_monitors()[0].height slapsh_width = splash.width() splash_height = splash.height() splash.move(int((original_width - slapsh_width) / 2), int((original_heigth - splash_height) / 2)) except Exception as e: pass splash.show() qss = """ QMenuBar::item { padding: 10px; margin: 0 5px } QMenu{ border-radius: 5px; } QMenu::item{ padding: 8px; margin: 5px; border-radius: 5px; } QToolTip { color: #111111; background-color: #EEEEEE; } QCheckBox{ margin: 0 7px; } QComboBox{ font-size: 10pt; font-weight: bold; } """ try: import yaml with open ("labelme/config/default_config.yaml", "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) qdarktheme.setup_theme(theme = config["theme"], default_theme = "dark", additional_qss=qss) except Exception as e: print(f"ERROR {e}") # create main window from labelme.app import MainWindow win = MainWindow() splash.finish(win) win.showMaximized() # close splash screen win.raise_() sys.exit(app.exec()) # this main block is required to generate executable by pyinstaller if __name__ == "__main__": main() ================================================ FILE: DLTA_AI_app/__main__.spec ================================================ # -*- mode: python -*- # vim: ft=python from glob import glob block_cipher = None datas_list = [ ('models_menu/*.json', 'models_menu'), ('models_menu/*.py', 'models_menu'), ('ultralytics/' , 'ultralytics'), ('labelme/' , 'labelme'), ('mmdetection/' , 'mmdetection'), ('trackers/' , 'trackers') ] hiddenimports_list = [ 'mmcv' , 'mmcv._ext', 'torchvision'] a = Analysis( ['__main__.py'], pathex=[], binaries=[], datas=datas_list, hiddenimports=hiddenimports_list, hookspath=[], hooksconfig={}, runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher, noarchive=False, ) pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE( pyz, a.scripts, [], exclude_binaries=True, name='DLTA-AI', debug=False, bootloader_ignore_signals=False, strip=False, upx=True, console=True, disable_windowed_traceback=False, argv_emulation=False, target_arch=None, codesign_identity=None, entitlements_file=None, icon = "C:\Graduation Project\Auto Annotation Tool\DLTA-AI\DLTA-AI-app\labelme\icons\icon.png" ) coll = COLLECT( exe, a.binaries, a.zipfiles, a.datas, strip=False, upx=True, upx_exclude=[], name='DLTA-AI', ) ================================================ FILE: DLTA_AI_app/inferencing.py ================================================ import copy from supervision.detection.core import Detections from time import time import torch from mmdet.apis import inference_detector, init_detector, async_inference_detector import cv2 import numpy as np import matplotlib.pyplot as plt import warnings # from ultralytics.yolo.utils.ops import Profile, non_max_suppression, scale_boxes, process_mask, process_mask_native from labelme.utils.helpers import mathOps warnings.filterwarnings("ignore") class models_inference(): def __init__(self): self.annotating_models = {} def full_points(bbox): return np.array([[bbox[0], bbox[1]], [bbox[0], bbox[3]], [bbox[2], bbox[3]], [bbox[2], bbox[1]]]) @torch.no_grad() def decode_file(self, img, model, classdict, threshold=0.3, img_array_flag=False): if model.__class__.__name__ == "YOLO": if isinstance(img, str): img = cv2.imread(img) # get image size img_resized = cv2.resize (img , (640, 640)) # default yolo arguments from yolov8 tracking repo # imgsz=(640, 640), # inference size (height, width) # conf_thres=0.25, # confidence threshold # iou_thres=0.45, # NMS IOU threshold # max_det=1000, # maximum detections per image results = model(img_resized , conf = 0.25 , iou= 0.45 , verbose = False) results = results[0] # if len results is 0 then return empty dict if results.masks is None: return {"results": {}} masks = results.masks.cpu().numpy().masks masks = masks > 0.0 org_size = img.shape[:2] out_size = masks.shape[1:] # print(f'org_size : {org_size} , out_size : {out_size}') # convert boxes to original image size same as the masks (coords = coords * org_size / out_size) boxes = results.boxes.xyxy.cpu().numpy() boxes = boxes * np.array([org_size[1] / out_size[1], org_size[0] / out_size[0], org_size[1] / out_size[1], org_size[0] / out_size[0]]) detections = Detections( xyxy=boxes, confidence=results.boxes.conf.cpu().numpy(), class_id=results.boxes.cls.cpu().numpy().astype(int) ) polygons = [] result_dict = {} resize_factors = [org_size[0] / out_size[0] , org_size[1] / out_size[1]] if len(masks) == 0: return {"results":{}} for mask in masks: polygon = mathOps.mask_to_polygons( mask, resize_factors=resize_factors) polygons.append(polygon) # detection is a tuple of (box, confidence, class_id, tracker_id) ind = 0 res_list = [] for detection in detections: if round(detection[1], 2) < float(threshold): continue result = {} result["class"] = classdict.get(int(detection[2])) result["confidence"] = str(round(detection[1], 2)) result["bbox"] = detection[0].astype(int) result["seg"] = polygons[ind] ind += 1 if result["class"] == None: continue if len(result["seg"]) < 3: continue res_list.append(result) result_dict["results"] = res_list return result_dict if img_array_flag: results = inference_detector(model, img) else: results = inference_detector(model, plt.imread(img)) # results = async_inference_detector(model, plt.imread(img_path)) torch.cuda.empty_cache() results0 = [] results1 = [] for i in classdict.keys(): mask = results[0][i][:, 4] >= float(threshold) results0.append(results[0][i][mask]) results1.append(list(np.array(results[1][i])[mask])) # for i in classdict.keys(): # results0.append(results[0][i]) # results1.append(results[1][i]) # self.annotating_models[model.__class__.__name__] = [results0 , results1] # print(self.annotating_models.keys()) # # if the length of the annotating_models is greater than 1 we need to merge the masks # if len(self.annotating_models.keys()) > 1: # print("merging masks") # results0,results1 = self.merge_masks() # assert len(results0) == len(results1) # for i in range(len(results0)): # assert len(results0[i]) == len(results1[i]) return results0, results1 def polegonise(self, results0, results1, classdict, threshold=0.3, show_bbox_flag=False): result_dict = {} res_list = [] self.classes_numbering = [keyno for keyno in classdict.keys()] # print(self.classes_numbering) for classno in range(len(results0)): for instance in range(len(results0[classno])): if float(results0[classno][instance][-1]) < float(threshold): continue result = {} result["class"] = classdict.get( self.classes_numbering[classno]) # Confidence result["confidence"] = str( round(results0[classno][instance][-1], 2)) if classno == 0: result["seg"] = mathOps.mask_to_polygons( results1[classno][instance].astype(np.uint8), 10) else: result["seg"] = mathOps.mask_to_polygons( results1[classno][instance].astype(np.uint8), 25) # result["bbox"] = self.get_bbox(result["seg"]) if show_bbox_flag: # result["bbox"] = full_points(result["bbox"]).tolist() # points = full_points(result["bbox"]) # result["x1"] = points[0][0] # result["y1"] = points[0][1] # result["x2"] = points[1][0] # result["y2"] = points[1][1] # result["x3"] = points[2][0] # result["y3"] = points[2][1] # result["x4"] = points[3][0] # result["y4"] = points[3][1] pass if result["class"] == None: continue if len(result["seg"]) < 3: continue res_list.append(result) result_dict["results"] = res_list return result_dict def merge_masks(self): tic = time() result0 = [] result1 = [] # Counting for debugging purposes # count the number of instances in each model counts = count_instances(self.annotating_models) # print the counts of each model for model in counts.keys(): print("model {} has {} instances".format(model, counts[model])) # the following lines can be used if we use models with different number of classes # classnos = [] # for model in self.annotating_models.keys(): # classnos.append(len(self.annotating_models[model][1])) # print(classnos) # instead the following line of code will be used if we use models with the same number of classes classnos = len(self.annotating_models[list( self.annotating_models.keys())[0]][1]) merged_counts = 0 # initialize the result list with the same number of classes as the model with the most classes for i in range(classnos): result1.append([]) result0.append([]) # deep copy the annotating_models dict to pop all the masks we have merged (try delete it for future optimisation) annotating_models_copy = copy.deepcopy(self.annotating_models) # merge masks of the same class for idx1, model in enumerate(self.annotating_models.keys()): for classno in range(len(self.annotating_models[model][1])): # check if an instance exists in the model in this class if len(self.annotating_models[model][1][classno]) > 0: for instance in range(len(self.annotating_models[model][1][classno])): for idx2, model2 in enumerate(self.annotating_models.keys()): if model != model2 and idx2 > idx1: # print(type(annotating_models_copy[model][0][classno]),type(annotating_models_copy[model2][0][classno])) # check if the class exists in the other model if classno in range(len(self.annotating_models[model2][1])): # check if an instance exists in the other model if len(self.annotating_models[model2][1][classno]) > 0: for instance2 in range(len(self.annotating_models[model2][1][classno])): dirty = False # print('checking class ' + str(classno) ' of models ' + model + str(idx1) + ' and ' + model2 + str(idx2)) # get the intersection percentage of the two masks intersection = np.logical_and( self.annotating_models[model][1][classno][instance], self.annotating_models[model2][1][classno][instance2]) intersection = np.sum(intersection) union = np.logical_or( self.annotating_models[model][1][classno][instance], self.annotating_models[model2][1][classno][instance2]) union = np.sum(union) iou = intersection / union # print('iou of class ' + str(classno) + ' instance ' + str(instance) + ' and instance ' + str(instance2) + ' is ' + str(iou)) if iou > 0.5: if (annotating_models_copy[model][1][classno][instance] is None) or (annotating_models_copy[model2][1][classno][instance2] is None): dirty = True if dirty == False: # merge their bboxes and store the result in result0 bbox1 = self.annotating_models[model][0][classno][instance] bbox2 = self.annotating_models[model2][0][classno][instance2] bbox = [min(bbox1[0], bbox2[0]), min(bbox1[1], bbox2[1]), max( bbox1[2], bbox2[2]), max(bbox1[3], bbox2[3]), max(bbox1[4], bbox2[4])] result0[classno].append( bbox) # store the merged mask in result1 result1[classno].append(np.logical_or( self.annotating_models[model][1][classno][instance], self.annotating_models[model2][1][classno][instance2])) # print('merging masks of class ' + str(classno) + ' instance ' + str(instance) + ' and instance ' + str(instance2) + ' of models ' + model + ' and ' + model2) merged_counts += 1 # remove the mask from both models annotating_models_copy[model][1][classno][instance] = None annotating_models_copy[model2][1][classno][instance2] = None annotating_models_copy[model][0][classno][instance] = None annotating_models_copy[model2][0][classno][instance2] = None # continue to the next instance of the first model break counts_here = {} # add the remaining masks to the result for model in annotating_models_copy.keys(): counts_here[model] = 0 for classno in range(len(annotating_models_copy[model][1])): for instance in range(len(annotating_models_copy[model][1][classno])): if annotating_models_copy[model][1][classno][instance] is not None: counts_here[model] += 1 # print('adding mask of class ' + str(classno) + ' instance ' + str(instance) + ' of model ' + model) result1[classno].append( annotating_models_copy[model][1][classno][instance]) result0[classno].append( annotating_models_copy[model][0][classno][instance]) # clear the annotating_models and add the result to it self.annotating_models = {} # self.annotating_models["merged"] = [result0 , result1] for model in counts_here.keys(): print("model {} has {} instances".format( model, counts_here[model])) print("merged {} instances".format(merged_counts)) tac = time() print("merging took {} ms".format((tac - tic) * 1000)) return result0, result1 # result will have ---> bbox , confidence , class_id , tracker_id , segment # result of the detection phase only should be (bbox , confidence , class_id , segment) def count_instances(annotating_models): # separate the counts for each model counts = {} for model in annotating_models.keys(): counts[model] = 0 for classno in range(len(annotating_models[model][1])): counts[model] += len(annotating_models[model][1][classno]) return counts ================================================ FILE: DLTA_AI_app/labelme/__init__.py ================================================ # flake8: noqa import logging import sys from qtpy import QT_VERSION __appname__ = "DLTA-AI" # Semantic Versioning 2.0.0: https://semver.org/ # 1. MAJOR version when you make incompatible API changes; # 2. MINOR version when you add functionality in a backwards-compatible manner; # 3. PATCH version when you make backwards-compatible bug fixes. __version__ = "1.1" QT4 = QT_VERSION[0] == "4" QT5 = QT_VERSION[0] == "5" del QT_VERSION PY2 = sys.version[0] == "2" PY3 = sys.version[0] == "3" del sys from labelme.label_file import LabelFile from labelme import testing from labelme import utils ================================================ FILE: DLTA_AI_app/labelme/app.py ================================================ # -*- coding: utf-8 -*- import functools import json import math import re import copy import imgviz import torch import cv2 import warnings import os import os.path as osp import numpy as np from pathlib import Path from PyQt6 import QtCore from PyQt6.QtCore import Qt, QThread from PyQt6 import QtGui from PyQt6 import QtWidgets from PyQt6.QtCore import QObject, pyqtSignal, pyqtSlot from . import __appname__ from . import PY2 from . import QT5 from . import utils from .utils.sam import Sam_Predictor from .utils.helpers import visualizations, mathOps from .utils.custom_exports import custom_exports_list from .config import get_config from .label_file import LabelFile from .label_file import LabelFileError from .logger import logger from .shape import Shape from .widgets import BrightnessContrastDialog, Canvas, LabelDialog, LabelListWidget, LabelListWidgetItem, ToolBar, UniqueLabelQListWidget, ZoomWidget from .widgets import MsgBox, interpolation_UI, exportData_UI, deleteSelectedShape_UI, scaleObject_UI, getIDfromUser_UI, notification from .widgets import runtime_data_UI, preferences_UI, shortcut_selector_UI, links, feedback_UI, check_updates_UI from .widgets.editLabel_videoMode import editLabel_idChanged_UI, editLabel_handle_data from .widgets.segmentation_options_UI import SegmentationOptionsUI from .widgets.merge_feature_UI import MergeFeatureUI from .intelligence import Intelligence from .intelligence import coco_classes, color_palette from supervision.detection.core import Detections from trackers.multi_tracker_zoo import create_tracker from ultralytics.yolo.utils.torch_utils import select_device warnings.filterwarnings("ignore") # the root of the repo FILE = Path(__file__).resolve() ROOT = FILE.parents[0] ROOT = ROOT.parents[0] ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') reid_weights = Path('osnet_x1_0_msmt17.pt') LABEL_COLORMAP = imgviz.label_colormap(value=200) class MainWindow(QtWidgets.QMainWindow): FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = 0, 1, 2 tracking_progress_bar_signal = pyqtSignal(int) def __init__( self, config=None, filename=None, output=None, output_file=None, output_dir=None, ): self.buttons_text_style_sheet = "QPushButton {font-size: 10pt; margin: 2px 5px; padding: 2px 7px;font-weight: bold; background-color: #0d69f5; color: #FFFFFF;} QPushButton:hover {background-color: #4990ED;} QPushButton:disabled {background-color: #7A7A7A;}" if output is not None: logger.warning( "argument output is deprecated, use output_file instead" ) if output_file is None: output_file = output # see labelme/config/default_config.yaml for valid configuration if config is None: config = get_config() self._config = config self.decodingCanceled = False # set default shape colors Shape.line_color = QtGui.QColor(*self._config["shape"]["line_color"]) Shape.fill_color = QtGui.QColor(*self._config["shape"]["fill_color"]) Shape.select_line_color = QtGui.QColor( *self._config["shape"]["select_line_color"] ) Shape.select_fill_color = QtGui.QColor( *self._config["shape"]["select_fill_color"] ) Shape.vertex_fill_color = QtGui.QColor( *self._config["shape"]["vertex_fill_color"] ) Shape.hvertex_fill_color = QtGui.QColor( *self._config["shape"]["hvertex_fill_color"] ) # update models json mathOps.update_saved_models_json(os.getcwd()) # add the segmentation UI controls interfance self.segmentation_options_UI = SegmentationOptionsUI(self) # add the merge ui interface self.merge_feature_UI = MergeFeatureUI(self) super(MainWindow, self).__init__() try: self.intelligenceHelper = Intelligence(self) except: print("it seems you have a problem with initializing model\ncheck you have at least one model") self.helper_first_time_flag = True else: self.helper_first_time_flag = False self.setWindowTitle(__appname__) # Whether we need to save or not. self.dirty = False self._noSelectionSlot = False # Main widgets and related state. self.labelDialog = LabelDialog( parent=self, labels=self._config["labels"], sort_labels=self._config["sort_labels"], show_text_field=self._config["show_label_text_field"], completion=self._config["label_completion"], fit_to_content=self._config["fit_to_content"], flags=self._config["label_flags"], ) self.labelList = LabelListWidget() self.lastOpenDir = None self.flag_dock = self.flag_widget = None self.flag_dock = QtWidgets.QDockWidget(self.tr("Flags"), self) self.flag_dock.setObjectName("Flags") self.flag_widget = QtWidgets.QListWidget() if config["flags"]: self.loadFlags({k: False for k in config["flags"]}) # self.flag_dock.setWidget(self.flag_widget) self.flag_widget.itemChanged.connect(self.setDirty) self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged) self.labelList.itemDoubleClicked.connect(self.editLabel) self.labelList.itemChanged.connect(self.labelItemChanged) self.labelList.itemDropped.connect(self.labelOrderChanged) self.shape_dock = QtWidgets.QDockWidget( self.tr("Polygon Labels"), self ) self.shape_dock.setObjectName("Labels") self.shape_dock.setWidget(self.labelList) self.uniqLabelList = UniqueLabelQListWidget() self.uniqLabelList.setToolTip( self.tr( "Select label to start annotating for it. " "Press 'Esc' to deselect." ) ) if self._config["labels"]: for label in self._config["labels"]: item = self.uniqLabelList.createItemFromLabel(label) self.uniqLabelList.addItem(item) rgb = self._get_rgb_by_label(label) self.uniqLabelList.setItemLabel(item, label, rgb) self.label_dock = QtWidgets.QDockWidget(self.tr(u"Label List"), self) self.label_dock.setObjectName(u"Label List") self.label_dock.setWidget(self.uniqLabelList) self.fileSearch = QtWidgets.QLineEdit() self.fileSearch.setPlaceholderText(self.tr("Search Filename")) self.fileSearch.textChanged.connect(self.fileSearchChanged) self.fileListWidget = QtWidgets.QListWidget() self.fileListWidget.itemSelectionChanged.connect( self.fileSelectionChanged ) fileListLayout = QtWidgets.QVBoxLayout() fileListLayout.setContentsMargins(0, 0, 0, 0) fileListLayout.setSpacing(0) fileListLayout.addWidget(self.fileSearch) fileListLayout.addWidget(self.fileListWidget) self.file_dock = QtWidgets.QDockWidget(self.tr(u"File List"), self) self.file_dock.setObjectName(u"Files") fileListWidget = QtWidgets.QWidget() fileListWidget.setLayout(fileListLayout) self.file_dock.setWidget(fileListWidget) self.vis_dock = QtWidgets.QDockWidget( self.tr(u"Visualization Options"), self) self.vis_dock.setObjectName(u"Visualization Options") self.vis_widget = QtWidgets.QWidget() self.vis_dock.setWidget(self.vis_widget) self.zoomWidget = ZoomWidget() self.setAcceptDrops(True) self.canvas = self.labelList.canvas = Canvas( epsilon=self._config["epsilon"], double_click=self._config["canvas"]["double_click"], num_backups=self._config["canvas"]["num_backups"], ) self.canvas.zoomRequest.connect(self.zoomRequest) scrollArea = QtWidgets.QScrollArea() scrollArea.setWidget(self.canvas) scrollArea.setWidgetResizable(True) self.scrollBars = { Qt.Orientation.Vertical: scrollArea.verticalScrollBar(), Qt.Orientation.Horizontal: scrollArea.horizontalScrollBar(), Qt.Orientation.Horizontal.value: scrollArea.horizontalScrollBar(), Qt.Orientation.Vertical.value: scrollArea.verticalScrollBar(), } self.canvas.scrollRequest.connect(self.scrollRequest) self.canvas.newShape.connect(self.newShape) self.canvas.shapeMoved.connect(self.setDirty) self.canvas.selectionChanged.connect(self.shapeSelectionChanged) self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive) self.canvas.edgeSelected.connect(self.canvasShapeEdgeSelected) self.canvas.APPrefresh.connect(self.refresh_image_MODE) # adding toolbars of SAM and and related slots self.addSamControls() # Canvas SAM slots self.canvas.pointAdded.connect(self.run_sam_model) self.canvas.samFinish.connect(self.sam_finish_annotation_button_clicked) # SAM predictor self.sam_predictor = None self.current_sam_shape = None self.SAM_SHAPES_IN_IMAGE = [] self.sam_last_mode = "rectangle" self.setCentralWidget(scrollArea) # for Export self.target_directory = "" self.save_path = "" self.global_listObj = [] # for merge self.multi_model_flag = False # adding toolbars of video mode and and related slots self.addVideoControls() # for video annotation and tracking self.frame_time = 0 self.FRAMES_TO_SKIP = 30 self.TRACK_ASSIGNED_OBJECTS_ONLY = False self.TrackingMode = False self.current_annotation_mode = "" self.CURRENT_ANNOATAION_FLAGS = {"traj": False, "bbox": True, "id": True, "class": True, "mask": True, "polygons": True, "conf": True} self.CURRENT_ANNOATAION_TRAJECTORIES = {'length': 30, 'alpha': 0.70} self.CURRENT_SHAPES_IN_IMG = [] self.featuresOptions = {'deleteDefault': "this frame only", 'interpolationDefMethod': "linear", 'interpolationDefType': "all", 'interpolationOverwrite': False, 'EditDefault': "Edit only this frame"} self.key_frames = {} self.id_frames_rec = {} self.copiedShapes = [] self.INDEX_OF_CURRENT_FRAME = 1 self.interrupted = False self.minID = -2 self.maxID = 0 for dock in ["label_dock", "shape_dock", "file_dock", "vis_dock"]: if self._config[dock]["closable"]: getattr(self, dock).setFeatures(QtWidgets.QDockWidget.DockWidgetFeature.DockWidgetClosable) if self._config[dock]["floatable"]: getattr(self, dock).setFeatures(QtWidgets.QDockWidget.DockWidgetFeature.DockWidgetFloatable) if self._config[dock]["movable"]: getattr(self, dock).setFeatures(QtWidgets.QDockWidget.DockWidgetFeature.DockWidgetMovable) if self._config[dock]["show"] is False: getattr(self, dock).setVisible(False) self.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.label_dock) self.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.shape_dock) self.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.file_dock) self.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.vis_dock) # Actions action = functools.partial(utils.newAction, self) shortcuts = self._config["shortcuts"] quit = action( self.tr("&Quit"), self.close, shortcuts["quit"], "quit", self.tr("Quit application"), ) open_ = action( self.tr("&Open Image"), self.openFile, shortcuts["open"], "open", self.tr(f"Open image or label file ({str(shortcuts['open'])})"), ) opendir = action( self.tr("&Open Dir"), self.openDirDialog, shortcuts["open_dir"], "opendir", self.tr(f"Open Dir ({str(shortcuts['open_dir'])})"), ) save = action( self.tr("&Save"), self.saveFile, shortcuts["save"], "save", self.tr(f"Save labels to file ({str(shortcuts['save'])})"), enabled=False, ) export = action( self.tr("&Export"), self.exportData, shortcuts["export"], "export", self.tr( f"Export annotations to COCO format ({str(shortcuts['export'])})"), enabled=False, ) modelExplorer = action( self.tr("&Model Explorer"), self.model_explorer, None, "checklist", self.tr(u"Model Explorer"), ) saveAs = action( self.tr("&Save As"), self.saveFileAs, shortcuts["save_as"], "save-as", self.tr("Save labels to a different file"), enabled=False, ) deleteFile = action( self.tr("&Delete File"), self.deleteFile, shortcuts["delete_file"], "delete", self.tr("Delete current label file"), enabled=False, ) changeOutputDir = action( self.tr("&Change Output Dir"), slot=self.changeOutputDirDialog, shortcut=shortcuts["save_to"], icon="open", tip=self.tr(u"Change where annotations are loaded/saved"), ) saveAuto = action( text=self.tr("Save &Automatically"), slot=lambda x: self.actions.saveAuto.setChecked(x), icon="save", tip=self.tr("Save automatically"), checkable=True, enabled=True, ) saveAuto.setChecked(self._config["auto_save"]) saveWithImageData = action( text="Save With Image Data", slot=self.enableSaveImageWithData, tip="Save image data in label file", checkable=True, checked=self._config["store_data"], ) close = action( "&Close", self.closeFile, shortcuts["close"], "close", "Close current file", ) toggle_keep_prev_mode = action( self.tr("Keep Previous Annotation"), self.toggleKeepPrevMode, shortcuts["toggle_keep_prev_mode"], None, self.tr('Toggle "keep pevious annotation" mode'), checkable=True, ) toggle_keep_prev_mode.setChecked(self._config["keep_prev"]) createMode = action( self.tr("Create Polygons"), self.setCreateMode, shortcuts["create_polygon"], "objects", self.tr("Start drawing polygons"), enabled=False, ) editMode = action( self.tr("Edit Polygons"), self.setEditMode, shortcuts["edit_polygon"], "edit", self.tr("Move and edit the selected polygons"), enabled=False, ) delete = action( self.tr("Delete Polygons"), self.deleteSelectedShape, shortcuts["delete_polygon"], "close", self.tr("Delete the selected polygons"), enabled=False, ) copy = action( self.tr("Duplicate Polygons"), self.copySelectedShape, shortcuts["duplicate_polygon"], "copy", self.tr("Create a duplicate of the selected polygons"), enabled=False, ) undoLastPoint = action( self.tr("Undo last point"), self.canvas.undoLastPoint, shortcuts["undo_last_point"], "undo", self.tr("Undo last drawn point"), enabled=False, ) addPointToEdge = action( text=self.tr("Add Point to Edge"), slot=self.canvas.addPointToEdge, shortcut=shortcuts["add_point_to_edge"], icon="add_point", tip=self.tr("Add point to the nearest edge"), enabled=False, ) removePoint = action( text="Remove Selected Point", slot=self.removeSelectedPoint, icon="edit", tip="Remove selected point from polygon", enabled=False, ) undo = action( self.tr("Undo"), self.undoShapeEdit, shortcuts["undo"], "undo", self.tr("Undo last add and edit of shape"), enabled=False, ) hideAll = action( self.tr("&Hide\nPolygons"), functools.partial(self.togglePolygons, False), icon="eye", tip=self.tr("Hide all polygons"), enabled=False, ) showAll = action( self.tr("&Show\nPolygons"), functools.partial(self.togglePolygons, True), icon="eye", tip=self.tr("Show all polygons"), enabled=False, ) zoom = QtWidgets.QWidgetAction(self) zoom.setDefaultWidget(self.zoomWidget) self.zoomWidget.setWhatsThis( self.tr( "Zoom in or out of the image. Also accessible with " "{} and {} from the canvas." ).format( utils.fmtShortcut( "{},{}".format(shortcuts["zoom_in"], shortcuts["zoom_out"]) ), utils.fmtShortcut(self.tr("Ctrl+Wheel")), ) ) self.zoomWidget.setEnabled(False) zoomIn = action( self.tr("Zoom &In"), functools.partial(self.addZoom, 1.1), shortcuts["zoom_in"], "zoom-in", self.tr("Increase zoom level"), enabled=False, ) zoomOut = action( self.tr("&Zoom Out"), functools.partial(self.addZoom, 0.9), shortcuts["zoom_out"], "zoom-out", self.tr("Decrease zoom level"), enabled=False, ) zoomOrg = action( self.tr("&Original size"), functools.partial(self.setZoom, 100), shortcuts["zoom_to_original"], "zoom", self.tr("Zoom to original size"), enabled=False, ) fitWindow = action( self.tr("&Fit Window"), self.setFitWindow, shortcuts["fit_window"], "fit-window", self.tr("Zoom follows window size"), checkable=True, enabled=False, ) fitWidth = action( self.tr("Fit &Width"), self.setFitWidth, shortcuts["fit_width"], "fit-width", self.tr("Zoom follows window width"), checkable=True, enabled=False, ) brightnessContrast = action( "&Brightness Contrast", self.brightnessContrast, None, "color", "Adjust brightness and contrast", enabled=False, ) show_cross_line = action( self.tr("&Toggle Cross Line"), self.enable_show_cross_line, tip=self.tr("cross line for mouse position"), icon="cartesian", checkable=True, checked=self._config["show_cross_line"], enabled=True, ) # Group zoom controls into a list for easier toggling. zoomActions = ( self.zoomWidget, zoomIn, zoomOut, zoomOrg, fitWindow, fitWidth, ) self.zoomMode = self.FIT_WINDOW fitWindow.setChecked(True) self.scalers = { self.FIT_WINDOW: self.scaleFitWindow, self.FIT_WIDTH: self.scaleFitWidth, # Set to one to scale to 100% when loading files. self.MANUAL_ZOOM: lambda: 1, } edit = action( self.tr("Edit &Label"), self.editLabel, shortcuts["edit_label"], "label", self.tr("Modify the label of the selected polygon"), enabled=False, ) enhance = action( self.tr("&Enhace Polygons"), self.sam_enhance_annotation_button_clicked, shortcuts["SAM_enhance"], "SAM", self.tr("Enhance the selected polygon with AI"), enabled=True, ) interpolate = action( self.tr("&Interpolation Tracking"), self.interpolateMENU, shortcuts["interpolate"], "tracking", self.tr("Interpolate the selected polygon between to frames to Track it"), enabled=True, ) mark_as_key = action( self.tr("&Mark as key"), self.mark_as_key, shortcuts["mark_as_key"], "mark", self.tr("Mark this frame as KEY for interpolation"), enabled=True, ) remove_all_keyframes = action( self.tr("&Remove all keyframes"), self.remove_all_keyframes, None, "mark", self.tr("Remove all keyframes"), enabled=True, ) scale = action( self.tr("&Scale"), self.scaleMENU, shortcuts["scale"], "resize", self.tr("Scale the selected polygon"), enabled=True, ) copyShapes = action( self.tr("&Copy"), self.ctrlCopy, shortcuts["copy"], "copy", self.tr("Copy selected polygons"), enabled=True, ) pasteShapes = action( self.tr("&Paste"), self.ctrlPaste, shortcuts["paste"], "paste", self.tr("paste copied polygons"), enabled=True, ) update_curr_frame = action( self.tr("&Update current frame"), self.update_current_frame_annotation_button_clicked, None, "done", self.tr("Update frame"), enabled=True, ) ignore_changes = action( self.tr("&Ignore changes"), self.main_video_frames_slider_changed, shortcuts["ignore_updates"], "delete", self.tr("Ignore unsaved changes"), enabled=True, ) fill_drawing = action( self.tr("Fill Drawing Polygon"), self.canvas.setFillDrawing, None, "color", self.tr("Fill polygon while drawing"), checkable=True, enabled=True, ) fill_drawing.trigger() # intelligence actions annotate_one_action = action( self.tr("Run Model on Current Image"), self.annotate_one, None, "open", self.tr("Run Model on Current Image") ) annotate_batch_action = action( self.tr("Run Model on All Images"), self.annotate_batch, None, "file", self.tr("Run Model on All Images") ) set_conf_threshold = action( self.tr("Confidence Threshold"), self.setConfThreshold, None, "tune", self.tr("Confidence Threshold") ) set_iou_threshold = action( self.tr("IOU Threshold (NMS)"), self.setIOUThreshold, None, "iou", self.tr("IOU Threshold (Non Maximum Suppression)") ) select_classes = action( self.tr("Select Classes"), self.selectClasses, None, "checklist", self.tr("Select Classes to be Annotated") ) merge_segmentation_models = action( self.tr("Merge Segmentation Models"), self.mergeSegModels, None, "merge", self.tr("Merge Segmentation Models") ) runtime_data = action( self.tr("Show Runtime Data"), runtime_data_UI.PopUp, None, "runtime", self.tr("Show Runtime Data") ) git_hub = action( self.tr("GitHub Repository"), links.open_git_hub, None, "github", self.tr("GitHub Repository") ) feedback = action( self.tr("Feedback"), feedback_UI.PopUp, None, "feedback", self.tr("Feedback") ) license = action( self.tr("license"), links.open_license, None, "license", self.tr("license") ) user_guide = action( self.tr("User Guide"), links.open_guide, None, "guide", self.tr("User Guide") ) check_updates = action( self.tr("Check for Updates"), check_updates_UI.PopUp, None, "info", self.tr("Check for Updates") ) preferences = action( self.tr("Preferences"), preferences_UI.PopUp, None, "settings", self.tr("Preferences") ) shortcut_selector = action( self.tr("Shortcuts"), shortcut_selector_UI.PopUp, None, "shortcuts", self.tr("Shortcuts") ) sam = action( self.tr("Toggle SAM Toolbar"), self.Segment_anything, None, "SAM", self.tr("Toggle SAM Toolbar") ) openVideo = action( self.tr("Open &Video"), self.openVideo, shortcuts["open_video"], "video", self.tr(f"Open a video file ({shortcuts['open_video']})"), ) openVideoFrames = action( self.tr("Open Video as Frames"), self.openVideoFrames, shortcuts["open_video_frames"], "frames", self.tr( f"Open Video as Frames ({shortcuts['open_video_frames']})"), ) # Lavel list context menu. labelmenu = QtWidgets.QMenu() utils.addActions(labelmenu, (edit, delete)) self.labelList.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu) self.labelList.customContextMenuRequested.connect( self.popLabelListMenu ) # Store actions for further handling. self.actions = utils.struct( saveAuto=saveAuto, saveWithImageData=saveWithImageData, changeOutputDir=changeOutputDir, save=save, saveAs=saveAs, open=open_, close=close, deleteFile=deleteFile, toggleKeepPrevMode=toggle_keep_prev_mode, delete=delete, edit=edit, copy=copy, undoLastPoint=undoLastPoint, undo=undo, addPointToEdge=addPointToEdge, removePoint=removePoint, createMode=createMode, editMode=editMode, zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg, fitWindow=fitWindow, fitWidth=fitWidth, brightnessContrast=brightnessContrast, show_cross_line=show_cross_line, zoomActions=zoomActions, export=export, openVideo=openVideo, openVideoFrames=openVideoFrames, fileMenuActions=(open_, opendir, save, saveAs, close, quit), modelExplorer=modelExplorer, runtime_data=runtime_data, tool=(), # XXX: need to add some actions here to activate the shortcut editMenu=( edit, copy, delete, None, undo, undoLastPoint, None, addPointToEdge, ), # menu shown at right click menu=( createMode, editMode, edit, enhance, interpolate, mark_as_key, remove_all_keyframes, scale, copyShapes, pasteShapes, copy, delete, undo, undoLastPoint, addPointToEdge, removePoint, update_curr_frame, ignore_changes ), onLoadActive=( close, createMode, editMode, brightnessContrast, ), onShapesPresent=(saveAs, hideAll, showAll), ) self.canvas.vertexSelected.connect(self.actions.removePoint.setEnabled) self.menus = utils.struct( file=self.menu(self.tr("&File")), edit=self.menu(self.tr("&Edit")), view=self.menu(self.tr("&View")), intelligence=self.menu(self.tr("&Auto Annotation")), model_selection=self.menu(self.tr("&Model Selection")), options=self.menu(self.tr("&Options")), help=self.menu(self.tr("&Help")), recentFiles=QtWidgets.QMenu(self.tr("Open &Recent")), saved_models=QtWidgets.QMenu(self.tr("Select Segmentation model")), tracking_models=QtWidgets.QMenu(self.tr("Select Tracking model")), labelList=labelmenu, certain_area=QtWidgets.QMenu(self.tr("Select Certain Area")), ui_elements=QtWidgets.QMenu(self.tr("&Show UI Elements")), zoom_options=QtWidgets.QMenu(self.tr("&Zoom Options")), ) utils.addActions( self.menus.file, ( open_, opendir, openVideo, openVideoFrames, None, save, saveAs, export, None, close, quit, ), ) utils.addActions(self.menus.intelligence, (annotate_one_action, annotate_batch_action, ) ) # View menu and its submenus self.menus.ui_elements.setIcon(QtGui.QIcon("labelme/icons/UI.png")) utils.addActions(self.menus.ui_elements, ( self.vis_dock.toggleViewAction(), self.label_dock.toggleViewAction(), self.shape_dock.toggleViewAction(), self.file_dock.toggleViewAction(), ) ) self.menus.zoom_options.setIcon(QtGui.QIcon("labelme/icons/zoom.png")) utils.addActions(self.menus.zoom_options, ( zoomIn, zoomOut, zoomOrg, None, fitWindow, fitWidth, ) ) utils.addActions( self.menus.view, (sam, self.menus.ui_elements, None, hideAll, showAll, None, self.menus.zoom_options, None, show_cross_line, ), ) # Model selection menu self.menus.saved_models.setIcon( QtGui.QIcon("labelme/icons/brain.png")) self.menus.tracking_models.setIcon( QtGui.QIcon("labelme/icons/tracking.png")) self.menus.certain_area.setIcon( QtGui.QIcon("labelme/icons/polygon.png")) utils.addActions( self.menus.model_selection, ( self.menus.saved_models, merge_segmentation_models, None, self.menus.tracking_models, None, modelExplorer, ), ) # Options menu utils.addActions( self.menus.options, ( set_conf_threshold, set_iou_threshold, self.menus.certain_area, None, select_classes, ), ) # Help menu utils.addActions( self.menus.help, ( user_guide, preferences, shortcut_selector, None, git_hub, feedback, None, runtime_data, None, license, check_updates ), ) self.menus.file.aboutToShow.connect(self.updateFileMenu) self.menus.file.aboutToShow.connect(self.update_models_menu) # Custom context menu for the canvas widget: utils.addActions(self.canvas.menus[0], self.actions.menu) utils.addActions( self.canvas.menus[1], ( action("&Copy here", self.copyShape), action("&Move here", self.moveShape), ), ) self.tools = self.toolbar("Tools") # Menu buttons on Left self.actions.tool = ( open_, opendir, openVideo, None, save, export, None, createMode, editMode, edit, None, delete, undo, None, ) self.statusBar().showMessage(self.tr("%s started.") % __appname__) self.statusBar().show() if output_file is not None and self._config["auto_save"]: logger.warn( "If `auto_save` argument is True, `output_file` argument " "is ignored and output filename is automatically " "set as IMAGE_BASENAME.json." ) self.output_file = output_file self.output_dir = output_dir # Application state. self.image = QtGui.QImage() self.imagePath = None self.recentFiles = [] self.maxRecent = 7 self.otherData = None self.zoom_level = 100 self.fit_window = False self.zoom_values = {} # key=filename, value=(zoom_mode, zoom_value) self.brightnessContrast_values = {} self.scroll_values = { Qt.Orientation.Horizontal: {}, Qt.Orientation.Vertical: {}, Qt.Orientation.Horizontal.value: {}, Qt.Orientation.Vertical.value: {}, } # key=filename, value=scroll_value if filename is not None and osp.isdir(filename): self.importDirImages(filename, load=False) else: self.filename = filename if config["file_search"]: self.fileSearch.setText(config["file_search"]) self.fileSearchChanged() # XXX: Could be completely declarative. # Restore application settings. self.settings = QtCore.QSettings("labelme", "labelme") # FIXME: QSettings.value can return None on PyQt4 self.recentFiles = self.settings.value("recentFiles", []) or [] size = self.settings.value("window/size", QtCore.QSize(600, 500)) position = self.settings.value("window/position", QtCore.QPoint(0, 0)) self.resize(size) self.move(position) # or simply: # self.restoreGeometry(settings['window/geometry'] self.restoreState( self.settings.value("window/state", QtCore.QByteArray()) ) # Populate the File menu dynamically. self.updateFileMenu() self.update_models_menu() # Since loading the file may take some time, # make sure it runs in the background. if self.filename is not None: self.queueEvent(functools.partial(self.loadFile, self.filename)) # Callbacks: self.zoomWidget.valueChanged.connect(self.paintCanvas) self.populateModeActions() self.right_click_menu() QtGui.QShortcut(QtGui.QKeySequence(self._config['shortcuts']['stop']), self).activated.connect(self.Escape_clicked) def menu(self, title, actions=None): menu = self.menuBar().addMenu(title) if actions: utils.addActions(menu, actions) return menu def toolbar(self, title, actions=None): toolbar = ToolBar(title) toolbar.setObjectName("%sToolBar" % title) # toolbar.setOrientation(Qt.Orientation.Vertical) toolbar.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextUnderIcon) if actions: utils.addActions(toolbar, actions) self.addToolBar(Qt.ToolBarArea.LeftToolBarArea, toolbar) return toolbar # Support Functions def noShapes(self): return not len(self.labelList) def populateModeActions(self): tool, menu = self.actions.tool, self.actions.menu self.tools.clear() utils.addActions(self.tools, tool) self.canvas.menus[0].clear() utils.addActions(self.canvas.menus[0], menu) self.menus.edit.clear() actions = ( self.actions.editMode, ) utils.addActions(self.menus.edit, actions + self.actions.editMenu) def setDirty(self): # Even if we autosave the file, we keep the ability to undo self.actions.undo.setEnabled(self.canvas.isShapeRestorable) if self._config["auto_save"] or self.actions.saveAuto.isChecked(): if self.output_dir: label_file_without_path = osp.basename(label_file) label_file = osp.join(self.output_dir, label_file_without_path) if os.path.isdir(label_file): os.remove(label_file) self.saveLabels(label_file) return self.dirty = True self.actions.save.setEnabled(True) title = __appname__ if self.filename is not None: title = "{} - {}*".format(title, self.filename) self.setWindowTitle(title) def setClean(self): self.dirty = False self.actions.save.setEnabled(False) self.actions.createMode.setEnabled(True) title = __appname__ if self.filename is not None: title = "{} - {}".format(title, self.filename) self.setWindowTitle(title) if self.hasLabelFile(): self.actions.deleteFile.setEnabled(True) else: self.actions.deleteFile.setEnabled(False) def toggleActions(self, value=True): """Enable/Disable widgets which depend on an opened image.""" for z in self.actions.zoomActions: z.setEnabled(value) for action in self.actions.onLoadActive: action.setEnabled(value) def canvasShapeEdgeSelected(self, selected, shape): self.actions.addPointToEdge.setEnabled( selected and shape and shape.canAddPoint() ) def queueEvent(self, function): QtCore.QTimer.singleShot(0, function) def status(self, message, delay=5000): self.statusBar().showMessage(message, delay) def resetState(self): self.labelList.clear() self.filename = None self.imagePath = None self.imageData = None self.CURRENT_FRAME_IMAGE = None self.labelFile = None self.otherData = None self.canvas.resetState() def currentItem(self): items = self.labelList.selectedItems() if items: return items[0] return None def addRecentFile(self, filename): if filename in self.recentFiles: self.recentFiles.remove(filename) elif len(self.recentFiles) >= self.maxRecent: self.recentFiles.pop() self.recentFiles.insert(0, filename) # Callbacks def Escape_clicked(self): """ Summary: This function is called when the user presses the escape key. It resets the SAM toolbar and the canvas. It also interrupts the current annotation process like (tracking, interpolation, etc.) """ self.interrupted = True self.sam_reset_button_clicked() if self.canvas.tracking_area == "drawing": self.certain_area_clicked(1) def undoShapeEdit(self): self.canvas.restoreShape() self.labelList.clear() self.loadShapes(self.canvas.shapes) self.actions.undo.setEnabled(self.canvas.isShapeRestorable) def toggleDrawingSensitive(self, drawing=True): """Toggle drawing sensitive. In the middle of drawing, toggling between modes should be disabled. """ self.actions.editMode.setEnabled(not drawing) self.actions.undoLastPoint.setEnabled(drawing) self.actions.undo.setEnabled(not drawing) self.actions.delete.setEnabled(not drawing) def toggleDrawMode(self, edit=True, createMode="polygon"): self.canvas.setEditing(edit) self.canvas.createMode = createMode if edit: self.actions.createMode.setEnabled(True) else: if createMode == "polygon": self.actions.createMode.setEnabled(False) else: self.actions.createMode.setEnabled(True) self.actions.editMode.setEnabled(not edit) def setEditMode(self): self.turnOFF_SAM() try: x = self.CURRENT_VIDEO_PATH except: self.toggleDrawMode(True) return self.update_current_frame_annotation() self.toggleDrawMode(True) def updateFileMenu(self): current = self.filename def exists(filename): return osp.exists(str(filename)) menu = self.menus.recentFiles menu.clear() files = [f for f in self.recentFiles if f != current and exists(f)] for i, f in enumerate(files): icon = utils.newIcon("brain") action = QtGui.QAction( icon, "&%d %s" % (i + 1, QtCore.QFileInfo(f).fileName()), self ) action.triggered.connect(functools.partial(self.loadRecent, f)) menu.addAction(action) def update_models_menu(self): menu = self.menus.saved_models menu.clear() with open("saved_models.json") as json_file: data = json.load(json_file) # loop through all the models i = 0 for model_name in list(data.keys()): if i >= 6: break icon = utils.newIcon("brain") action = QtGui.QAction( icon, "&%d %s" % (i + 1, model_name), self) action.triggered.connect(functools.partial( self.change_curr_model, model_name)) menu.addAction(action) i += 1 self.add_tracking_models_menu() self.add_certain_area_menu() def add_tracking_models_menu(self): menu2 = self.menus.tracking_models menu2.clear() icon = utils.newIcon("tracking") action = QtGui.QAction( icon, "1 Byte track (DEFAULT)", self) action.triggered.connect( lambda: self.update_tracking_method('bytetrack')) menu2.addAction(action) icon = utils.newIcon("tracking") action = QtGui.QAction( icon, "2 Strong SORT (lowest id switch)", self) action.triggered.connect( lambda: self.update_tracking_method('strongsort')) menu2.addAction(action) icon = utils.newIcon("tracking") action = QtGui.QAction( icon, "3 Deep SORT", self) action.triggered.connect( lambda: self.update_tracking_method('deepocsort')) menu2.addAction(action) icon = utils.newIcon("tracking") action = QtGui.QAction( icon, "4 OC SORT", self) action.triggered.connect(lambda: self.update_tracking_method('ocsort')) menu2.addAction(action) icon = utils.newIcon("tracking") action = QtGui.QAction( icon, "5 BoT SORT", self) action.triggered.connect( lambda: self.update_tracking_method('botsort')) menu2.addAction(action) def add_certain_area_menu(self): menu3 = self.menus.certain_area menu3.clear() icon = utils.newIcon("polygon") action = QtGui.QAction( icon, "Select Certain Area", self) action.triggered.connect( lambda: self.certain_area_clicked(1)) menu3.addAction(action) icon = utils.newIcon("rectangle") action = QtGui.QAction( icon, "Cancel Area", self) action.triggered.connect( lambda: self.certain_area_clicked(0)) menu3.addAction(action) def update_tracking_method(self, method='bytetrack'): self.waitWindow( visible=True, text=f'Please Wait.\n{method} is Loading...') self.tracking_method = method self.tracking_config = ROOT / 'trackers' / \ method / 'configs' / (method + '.yaml') with torch.no_grad(): device = select_device('') print( f'tracking method {self.tracking_method} , config {self.tracking_config} , reid {reid_weights} , device {device} , half {False}') self.tracker = create_tracker( self.tracking_method, self.tracking_config, reid_weights, device, False) if hasattr(self.tracker, 'model'): if hasattr(self.tracker.model, 'warmup'): self.tracker.model.warmup() self.waitWindow() print(f'Changed tracking method to {method}') def popLabelListMenu(self, point): self.menus.labelList.exec(self.labelList.mapToGlobal(point)) def validateLabel(self, label): # no validation if self._config["validate_label"] is None: return True for i in range(self.uniqLabelList.count()): label_i = self.uniqLabelList.item(i).data(Qt.ItemDataRole.UserRole) if self._config["validate_label"] in ["exact"]: if label_i == label: return True return False def setCreateMode(self): self.turnON_SAM() self.toggleDrawMode(False, createMode="polygon") return def editLabel(self, item=None): if self.current_annotation_mode == 'video': self.update_current_frame_annotation() if item and not isinstance(item, LabelListWidgetItem): raise TypeError("item must be LabelListWidgetItem type") if not self.canvas.editing(): return if not item: item = self.currentItem() if item is None: return shape = item.shape() if shape is None: return old_text, old_flags, old_group_id, old_content = self.labelDialog.popUp( text=shape.label, flags=shape.flags, group_id=shape.group_id, content=shape.content, skip_flag=True ) text, flags, new_group_id, content = self.labelDialog.popUp( text=shape.label, flags=shape.flags, group_id=shape.group_id, content=shape.content ) if text is None: return if not self.validateLabel(text): self.errorMessage( self.tr("Invalid label"), self.tr("Invalid label '{}' with validation type '{}'").format( text, self._config["validate_label"] ), ) return shape.label = text shape.flags = flags shape.group_id = new_group_id shape.content = str(content) # if img or dir -> do smth then return if self.current_annotation_mode == 'img' or self.current_annotation_mode == 'dir': item.setText(f'{shape.label}') self.setDirty() if not self.uniqLabelList.findItemsByLabel(shape.label): item = QtWidgets.QListWidgetItem() item.setData(Qt.ItemDataRole.UserRole, shape.label) self.uniqLabelList.addItem(item) self.refresh_image_MODE() return # now we are in video mode if shape.group_id is None: item.setText(shape.label) else: idChanged = old_group_id != new_group_id result, self.featuresOptions, only_this_frame, duplicates = editLabel_idChanged_UI( self.featuresOptions, old_group_id, new_group_id, self.id_frames_rec, self.INDEX_OF_CURRENT_FRAME) if duplicates or result != QtWidgets.QDialog.DialogCode.Accepted: shape.label = old_text shape.flags = old_flags shape.content = old_content shape.group_id = old_group_id return self.minID = min(self.minID, new_group_id - 1) listObj = self.load_objects_from_json__orjson() self.id_frames_rec, self.CURRENT_ANNOATAION_TRAJECTORIES, listObj = editLabel_handle_data( currFrame=self.INDEX_OF_CURRENT_FRAME, listObj=listObj, trajectories=self.CURRENT_ANNOATAION_TRAJECTORIES, id_frames_rec=self.id_frames_rec, idChanged=idChanged, only_this_frame=only_this_frame, shape=shape, old_group_id=old_group_id, new_group_id=new_group_id,) self.load_objects_to_json__orjson(listObj) self.main_video_frames_slider_changed() def mark_as_key(self): """ Summary: This function is called when the user presses the "Mark as Key" button. It marks the selected shape as a key frame. """ try: self.update_current_frame_annotation() id = self.canvas.selectedShapes[0].group_id try: if self.INDEX_OF_CURRENT_FRAME not in self.key_frames['id_' + str(id)]: self.key_frames['id_' + str(id)].add(self.INDEX_OF_CURRENT_FRAME) else: res = MsgBox.OKmsgBox( "Caution", f"Frame {self.INDEX_OF_CURRENT_FRAME} is already a key frame for ID {id}.\nDo you want to remove it?", "warning", turnResult=True) if res == QtWidgets.QMessageBox.StandardButton.Ok: self.key_frames['id_' + str(id)].remove(self.INDEX_OF_CURRENT_FRAME) else: return except: self.key_frames['id_' + str(id)] = set() self.key_frames['id_' + str(id)].add(self.INDEX_OF_CURRENT_FRAME) self.main_video_frames_slider_changed() except Exception as e: MsgBox.OKmsgBox("Error", f"Error: {e}", "critical") def remove_all_keyframes(self): try: self.update_current_frame_annotation() id = self.canvas.selectedShapes[0].group_id self.key_frames['id_' + str(id)] = set() except: pass def rec_frame_for_id(self, id, frame, type_='add'): """ Summary: To store the frames in which the object with the given id is present. Args: id (int): The id of the object. frame (int): The frame number. type_ (str, optional): 'add' or 'remove'. Defaults to 'add'. 'add' to add the frame to the list of frames in which the object is present. 'remove' to remove the frame from the list of frames in which the object is present. Returns: None """ if type_ == 'add': try: self.id_frames_rec['id_' + str(id)].add(frame) except: self.id_frames_rec['id_' + str(id)] = set() self.id_frames_rec['id_' + str(id)].add(frame) else: try: self.id_frames_rec['id_' + str(id)].remove(frame) except: pass def interpolateMENU(self, item=None): try: if len(self.canvas.selectedShapes) == 0: mb = QtWidgets.QMessageBox msg = self.tr("Interpolate all IDs?\n") answer = mb.warning(self, self.tr( "Attention"), msg, mb.StandardButton.Yes | mb.StandardButton.No) if answer != mb.StandardButton.Yes: return else: self.update_current_frame_annotation() keys = list(self.id_frames_rec.keys()) idsORG = [int(keys[i][3:]) for i in range(len(keys))] else: self.update_current_frame_annotation() idsORG = [shape.group_id for shape in self.canvas.selectedShapes] id = self.canvas.selectedShapes[0].group_id result, self.featuresOptions = interpolation_UI.PopUp(self.featuresOptions) if result != QtWidgets.QDialog.DialogCode.Accepted: return with_linear = True if self.featuresOptions['interpolationDefMethod'] == 'linear' else False with_sam = True if self.featuresOptions['interpolationDefMethod'] == 'SAM' else False with_keyframes = True if self.featuresOptions['interpolationDefType'] == 'key' else False if with_keyframes: allAccepted, allRejected, ids = mathOps.checkKeyFrames( idsORG, self.key_frames) if not allAccepted: if allRejected: MsgBox.OKmsgBox("Key Frames Error", f"All of the selected IDs have no KEY frames.\n ie. less than 2 key frames\n The interpolation is NOT performed.") return else: resutl = MsgBox.OKmsgBox("Key Frames Error", f"Some of the selected IDs have no KEY frames.\n ie. less than 2 key frames\n The interpolation is performed only for the IDs with KEY frames.\nIDs: {ids}.", "info", turnResult=True) if resutl != QtWidgets.QMessageBox.StandardButton.Ok: return else: ids = idsORG self.interrupted = False if with_sam: self.interpolate_with_sam(ids, with_keyframes) else: for id in ids: QtWidgets.QApplication.processEvents() if self.interrupted: self.interrupted = False break self.interpolate(id=id, only_edited=with_keyframes) self.waitWindow() except Exception as e: MsgBox.OKmsgBox("Error", f"Error: {e}", "critical") def interpolate(self, id, only_edited=False): """ Summary: This function is called when the user presses the "Interpolate" button. It interpolates the object with the given id. Args: id (int): The id of the object. only_edited (bool, optional): True to interpolate using only the key frames. Defaults to False. """ self.waitWindow( visible=True, text=f'Please Wait.\nID {id} is being interpolated...') listObj = self.load_objects_from_json__orjson() if only_edited: try: FRAMES = list(self.key_frames['id_' + str(id)]) except: return else: FRAMES = list(self.id_frames_rec['id_' + str(id)]) if len( self.id_frames_rec['id_' + str(id)]) > 1 else [-1] first_frame_idx = min(FRAMES) last_frame_idx = max(FRAMES) if (first_frame_idx >= last_frame_idx): return records = [None for i in range(first_frame_idx - 1, last_frame_idx, 1)] for frame in range(first_frame_idx, last_frame_idx + 1, 1): listobjframe = listObj[frame - 1]['frame_idx'] frameobjects = listObj[frame - 1]['frame_data'] for object_ in frameobjects: if (object_['tracker_id'] == id): if ((not only_edited) or (listobjframe in FRAMES)): records[frame - first_frame_idx] = copy.deepcopy(object_) break baseObject = None baseObjectFrame = None nextObject = None nextObjectFrame = None for frame in range(first_frame_idx, last_frame_idx, 1): QtWidgets.QApplication.processEvents() if self.interrupted: break listobjframe = listObj[frame - 1]['frame_idx'] frameobjects = listObj[frame - 1]['frame_data'] # if object is present in this frame, then it is base object and we calculate next object if (records[frame - first_frame_idx] is not None): # assign it as base object baseObject = copy.deepcopy(records[frame - first_frame_idx]) baseObjectFrame = frame # find next object for j in range(frame + 1, last_frame_idx + 1, 1): if (records[j - first_frame_idx] != None): nextObject = copy.deepcopy( records[j - first_frame_idx]) nextObjectFrame = j break # job done, go to next frame continue # if only_edited is true and the frame is not key, then we remove the object from the frame to be interpolated if (only_edited and (frame not in FRAMES)): for object_ in frameobjects: if (object_['tracker_id'] == id): listObj[frame - 1]['frame_data'].remove(object_) break # if object is not present in this frame, then we calculate the object for this frame cur = mathOps.getInterpolated(baseObject=baseObject, baseObjectFrame=baseObjectFrame, nextObject=nextObject, nextObjectFrame=nextObjectFrame, curFrame=frame,) listObj[frame - 1]['frame_data'].append(cur) self.rec_frame_for_id(id, frame) self.load_objects_to_json__orjson(listObj) frames = range(first_frame_idx - 1, last_frame_idx, 1) self.calculate_trajectories(frames) self.main_video_frames_slider_changed() def interpolate_with_sam(self, idsLISTX, only_edited=False): """ Summary: This function is called when the user chooses the "Interpolate with SAM". It interpolates and inhance the objects with the given ids using SAM. Args: idsLISTX (list): The list of ids of the objects. """ self.waitWindow( visible=True, text=f'Please Wait.\nIDs are being interpolated with SAM...') if self.sam_model_comboBox.currentText() == "Select Model (SAM disabled)": MsgBox.OKmsgBox("SAM is disabled", f"SAM is disabled.\nPlease enable SAM.") return idsLIST = [] first_frame_idxLIST = [] last_frame_idxLIST = [] for id in idsLISTX: try: if only_edited: [minf, maxf] = [min( self.key_frames['id_' + str(id)]), max(self.key_frames['id_' + str(id)])] else: [minf, maxf] = [min( self.id_frames_rec['id_' + str(id)]), max(self.id_frames_rec['id_' + str(id)])] except: continue if minf == maxf: continue first_frame_idxLIST.append(minf) last_frame_idxLIST.append(maxf) idsLIST.append(id) if len(idsLIST) == 0: return overwrite = self.featuresOptions['interpolationOverwrite'] listObj = self.load_objects_from_json__orjson() listObjNEW = copy.deepcopy(listObj) recordsLIST = [[None for ii in range( first_frame_idxLIST[i], last_frame_idxLIST[i] + 1)] for i in range(len(idsLIST))] for i in range(min(first_frame_idxLIST) - 1, max(last_frame_idxLIST), 1): self.waitWindow(visible=True) listobjframe = listObj[i]['frame_idx'] frameobjects = listObj[i]['frame_data'].copy() for object_ in frameobjects: if (object_['tracker_id'] in idsLIST): index = idsLIST.index(object_['tracker_id']) recordsLIST[index][listobjframe - first_frame_idxLIST[index]] = copy.deepcopy(object_) listObj[i]['frame_data'].remove(object_) for frameIDX in range(min(first_frame_idxLIST), max(last_frame_idxLIST) + 1): QtWidgets.QApplication.processEvents() if self.interrupted: self.interrupted = False break self.waitWindow( visible=True, text=f'Please Wait.\nIDs are being interpolated with SAM...\nFrame {frameIDX}') frameIMAGE = self.get_frame_by_idx(frameIDX) for ididx in range(len(idsLIST)): i = frameIDX - first_frame_idxLIST[ididx] self.waitWindow(visible=True) if frameIDX < first_frame_idxLIST[ididx] or frameIDX > last_frame_idxLIST[ididx]: continue records = recordsLIST[ididx] if (records[i] != None): current = copy.deepcopy(records[i]) cur_bbox = current['bbox'] if not overwrite: listObj[frameIDX - 1]['frame_data'].append(current) continue else: prev_idx = i - 1 current = copy.deepcopy(records[i - 1]) next_idx = i + 1 for j in range(i + 1, len(records)): self.waitWindow(visible=True) if (records[j] != None): next_idx = j break cur_bbox = ((next_idx - i) / (next_idx - prev_idx)) * np.array(records[prev_idx]['bbox']) + ( (i - prev_idx) / (next_idx - prev_idx)) * np.array(records[next_idx]['bbox']) cur_bbox = [int(cur_bbox[i]) for i in range(len(cur_bbox))] current['bbox'] = copy.deepcopy(cur_bbox) records[i] = current try: same_image = self.sam_predictor.check_image( frameIMAGE) except: return cur_bbox, cur_segment = self.sam_enhanced_bbox_segment( frameIMAGE, cur_bbox, 1.2, max_itr=5, forSHAPE=False) current['bbox'] = copy.deepcopy(cur_bbox) current['segment'] = copy.deepcopy(cur_segment) # append the shape frame by frame (cause we already removed it in the prev. for loop) listObj[frameIDX - 1]['frame_data'].append(current) self.rec_frame_for_id(idsLIST[ididx], frameIDX) # update frame by frame to the to-be-uploaded listObj listObjNEW[frameIDX - 1] = copy.deepcopy(listObj[frameIDX - 1]) self.load_objects_to_json__orjson(listObjNEW) self.calculate_trajectories( range(min(first_frame_idxLIST) - 1, max(last_frame_idxLIST), 1)) self.main_video_frames_slider_changed() # Notify the user that the interpolation is finished self._config = get_config() if not self._config["mute"]: if not self.isActiveWindow(): notification.PopUp("SAM Interpolation Completed") def get_frame_by_idx(self, frameIDX): self.CAP.set(cv2.CAP_PROP_POS_FRAMES, frameIDX - 1) success, img = self.CAP.read() return img def scaleMENU(self): """ Summary: This function is called when the user presses the "Scale" button. It scales the selected shape. """ if len(self.canvas.selectedShapes) != 1: MsgBox.OKmsgBox(f'Scale error', f'There is {len(self.canvas.selectedShapes)} selected shapes. Please select only one shape to scale.') return result = scaleObject_UI.PopUp(self) if result == QtWidgets.QDialog.DialogCode.Accepted: self.update_current_frame_annotation_button_clicked() return else: self.main_video_frames_slider_changed() return def ctrlCopy(self): """ Summary: This function is called when the user presses the "Copy" button. It copies the selected shape(s). """ if len(self.canvas.selectedShapes) == 0: return self.copiedShapes = copy.deepcopy(self.canvas.selectedShapes) def ctrlPaste(self): """ Summary: This function is called when the user presses the "Paste" button. It pastes the copied shape(s). """ if len(self.copiedShapes) == 0: return ids = [shape.group_id for shape in self.canvas.shapes] flag = False for shape in self.copiedShapes: if shape.group_id in ids: flag = True continue self.canvas.shapes.append(shape) self.addLabel(shape) self.rec_frame_for_id(shape.group_id, self.INDEX_OF_CURRENT_FRAME) if flag: MsgBox.OKmsgBox("IDs already exist", "A Shape(s) with the same ID(s) already exist(s) in this frame.\n\nShapes with no duplicate IDs are Copied Successfully.") if self.current_annotation_mode == "video": self.update_current_frame_annotation_button_clicked() def fileSearchChanged(self): self.importDirImages( self.lastOpenDir, pattern=self.fileSearch.text(), load=False, ) def fileSelectionChanged(self): items = self.fileListWidget.selectedItems() if not items: return item = items[0] if not self.mayContinue(): return currIndex = self.imageList.index(str(item.text())) if currIndex < len(self.imageList): filename = self.imageList[currIndex] if filename: self.loadFile(filename) self.refresh_image_MODE() # React to canvas signals. def shapeSelectionChanged(self, selected_shapes): try: self._noSelectionSlot = True for shape in self.canvas.selectedShapes: shape.selected = False self.labelList.clearSelection() self.canvas.selectedShapes = selected_shapes for shape in self.canvas.selectedShapes: shape.selected = True item = self.labelList.findItemByShape(shape) self.labelList.selectItem(item) self.labelList.scrollToItem(item) self._noSelectionSlot = False n_selected = len(selected_shapes) self.actions.delete.setEnabled(n_selected) self.actions.copy.setEnabled(n_selected) self.actions.edit.setEnabled(n_selected == 1) except Exception as e: pass def addLabel(self, shape): if shape.group_id is None or self.current_annotation_mode != "video": text = shape.label else: text = f' ID {shape.group_id}: {shape.label}' label_list_item = LabelListWidgetItem(text, shape) self.labelList.addItem(label_list_item) if not self.uniqLabelList.findItemsByLabel(shape.label): item = self.uniqLabelList.createItemFromLabel(shape.label) self.uniqLabelList.addItem(item) rgb = self._get_rgb_by_label(shape.label) self.uniqLabelList.setItemLabel(item, shape.label, rgb) self.labelDialog.addLabelHistory(shape.label) for action in self.actions.onShapesPresent: action.setEnabled(True) rgb = self._get_rgb_by_label(shape.label) r, g, b = rgb label_list_item.setText( '{} '.format( text, r, g, b ) ) shape.line_color = QtGui.QColor(r, g, b) shape.vertex_fill_color = QtGui.QColor(r, g, b) shape.hvertex_fill_color = QtGui.QColor(255, 255, 255) shape.fill_color = QtGui.QColor(r, g, b, 128) shape.select_line_color = QtGui.QColor(255, 255, 255) shape.select_fill_color = QtGui.QColor(r, g, b, 155) def _get_rgb_by_label(self, label): if self._config["shape_color"] == "auto": label_ascii = sum([ord(c) for c in label]) idx = label_ascii % len(color_palette) color = color_palette[idx] # convert color from bgr to rgb return color[::-1] elif ( self._config["shape_color"] == "manual" and self._config["label_colors"] and label in self._config["label_colors"] ): return self._config["label_colors"][label] elif self._config["default_shape_color"]: return self._config["default_shape_color"] def remLabels(self, shapes): for shape in shapes: item = self.labelList.findItemByShape(shape) self.labelList.removeItem(item) def loadShapes(self, shapes, replace=True): self._noSelectionSlot = True # sort shapes by group_id but only if its not None shapes = sorted(shapes, key=lambda x: int(x.group_id) if x.group_id is not None else 0) for shape in shapes: self.addLabel(shape) self.labelList.clearSelection() self._noSelectionSlot = False self.canvas.loadShapes(shapes, replace=replace) for shape in self.canvas.shapes: self.canvas.setShapeVisible( shape, self.CURRENT_ANNOATAION_FLAGS["polygons"]) def loadLabels(self, shapes, replace=True): s = [] for shape in shapes: label = shape["label"] points = shape["points"] bbox = shape["bbox"] shape_type = shape["shape_type"] content = shape["content"] group_id = shape["group_id"] if not points: # skip point-empty shape continue shape = Shape( label=label, shape_type=shape_type, group_id=group_id, content=content, ) for i in range(0, len(points), 2): shape.addPoint(QtCore.QPointF(points[i], points[i + 1])) shape.close() default_flags = {} if self._config["label_flags"]: for pattern, keys in self._config["label_flags"].items(): if re.match(pattern, label): for key in keys: default_flags[key] = False shape.flags = default_flags s.append(shape) self.loadShapes(s, replace=replace) def loadFlags(self, flags): self.flag_widget.clear() for key, flag in flags.items(): item = QtWidgets.QListWidgetItem(key) item.setFlags(item.flags() | Qt.ItemIsUserCheckable) item.setCheckState(Qt.CheckState.Checked if flag else Qt.CheckState.Unchecked) self.flag_widget.addItem(item) def saveLabels(self, filename): lf = LabelFile() def format_shape(s): data = s.other_data.copy() data.update( dict( label=s.label.encode("utf-8") if PY2 else s.label, # convert points into 1D array points=mathOps.flattener(s.points), bbox=s.bbox, group_id=s.group_id, content=s.content, shape_type=s.shape_type, flags=s.flags, ) ) return data shapes = [format_shape(item.shape()) for item in self.labelList] flags = {} for i in range(self.flag_widget.count()): item = self.flag_widget.item(i) key = item.text() flag = item.checkState() == Qt.CheckState.Checked flags[key] = flag try: imagePath = osp.relpath(self.imagePath, osp.dirname(filename)) imageData = self.imageData if self._config["store_data"] else None if osp.dirname(filename) and not osp.exists(osp.dirname(filename)): os.makedirs(osp.dirname(filename)) lf.save( filename=filename, shapes=shapes, imagePath=imagePath, imageData=imageData, imageHeight=self.image.height(), imageWidth=self.image.width(), otherData=self.otherData, flags=flags, ) self.labelFile = lf items = self.fileListWidget.findItems( self.imagePath, Qt.MatchFlag.MatchExactly ) if len(items) > 0: if len(items) != 1: raise RuntimeError("There are duplicate files.") items[0].setCheckState(Qt.CheckState.Checked) # disable allows next and previous image to proceed return True except LabelFileError as e: self.errorMessage( self.tr("Error saving label data"), self.tr("%s") % e ) return False def copySelectedShape(self): added_shapes = self.canvas.copySelectedShapes() self.labelList.clearSelection() for shape in added_shapes: self.addLabel(shape) self.setDirty() def labelSelectionChanged(self): if self._noSelectionSlot: return if self.canvas.editing(): selected_shapes = [] for item in self.labelList.selectedItems(): selected_shapes.append(item.shape()) if selected_shapes: self.canvas.selectShapes(selected_shapes) else: self.canvas.deSelectShape() def labelItemChanged(self, item): shape = item.shape() self.canvas.setShapeVisible(shape, item.checkState() == Qt.CheckState.Checked) def labelOrderChanged(self): self.setDirty() self.canvas.loadShapes([item.shape() for item in self.labelList]) # Callback functions: def newShape(self): """Pop-up and give focus to the label editor. position MUST be in global coordinates. """ items = self.uniqLabelList.selectedItems() text = None if items: text = items[0].data(Qt.ItemDataRole.UserRole) flags = {} group_id = None if self._config["display_label_popup"] or not text: previous_text = self.labelDialog.edit.text() text, flags, group_id, content = self.labelDialog.popUp(text) if not text: self.labelDialog.edit.setText(previous_text) if text and not self.validateLabel(text): self.errorMessage( self.tr("Invalid label"), self.tr("Invalid label '{}' with validation type '{}'").format( text, self._config["validate_label"] ), ) text = "" if text == "SAM instance": text = "SAM instance - confirmed" if self.current_annotation_mode == "video": group_id, text = getIDfromUser_UI.PopUp(self, group_id, text) if text: if group_id is None: group_id = self.minID self.minID -= 1 else: self.minID = min(self.minID, group_id - 1) if self.canvas.SAM_mode == "finished": self.current_sam_shape["label"] = text self.current_sam_shape["group_id"] = group_id else: self.labelList.clearSelection() # shape below is of type qt shape shape = self.canvas.setLastLabel(text, flags) shape.group_id = group_id shape.content = content self.addLabel(shape) self.rec_frame_for_id(group_id, self.INDEX_OF_CURRENT_FRAME) self.actions.editMode.setEnabled(True) self.actions.undoLastPoint.setEnabled(False) self.actions.undo.setEnabled(True) self.setDirty() self.refresh_image_MODE() else: if self.canvas.SAM_mode == "finished": self.current_sam_shape["label"] = text self.current_sam_shape["group_id"] = -1 self.canvas.SAM_mode = "" else: self.canvas.undoLastLine() self.canvas.shapesBackups.pop() if self.current_annotation_mode == "video": self.update_current_frame_annotation_button_clicked() self.update_current_frame_annotation_button_clicked() def scrollRequest(self, delta, orientation): units = -delta * 0.1 # natural scroll bar = self.scrollBars[orientation] value = bar.value() + bar.singleStep() * units self.setScroll(orientation, value) def setScroll(self, orientation, value): self.scrollBars[orientation].setValue(value) self.scroll_values[orientation][self.filename] = value def setZoom(self, value): self.actions.fitWidth.setChecked(False) self.actions.fitWindow.setChecked(False) self.zoomMode = self.MANUAL_ZOOM self.zoomWidget.setValue(value) self.zoom_values[self.filename] = (self.zoomMode, value) def addZoom(self, increment=1.1): zoom_value = self.zoomWidget.value() * increment if increment > 1: zoom_value = math.ceil(zoom_value) else: zoom_value = math.floor(zoom_value) self.setZoom(zoom_value) def zoomRequest(self, delta, pos): canvas_width_old = self.canvas.width() units = 1.1 if delta < 0: units = 0.9 self.addZoom(units) canvas_width_new = self.canvas.width() if canvas_width_old != canvas_width_new: canvas_scale_factor = canvas_width_new / canvas_width_old x_shift = round(pos.x() * canvas_scale_factor) - pos.x() y_shift = round(pos.y() * canvas_scale_factor) - pos.y() self.setScroll( Qt.Orientation.Horizontal, self.scrollBars[Qt.Orientation.Horizontal].value() + x_shift, ) self.setScroll( Qt.Orientation.Vertical, self.scrollBars[Qt.Orientation.Vertical].value() + y_shift, ) def setFitWindow(self, value=True): if value: self.actions.fitWidth.setChecked(False) self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM self.adjustScale() def setFitWidth(self, value=True): if value: self.actions.fitWindow.setChecked(False) self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM self.adjustScale() def onNewBrightnessContrast(self, qimage): self.canvas.loadPixmap( QtGui.QPixmap.fromImage(qimage), clear_shapes=False ) def enable_show_cross_line(self, enabled): self._config["show_cross_line"] = enabled self.actions.show_cross_line.setChecked(enabled) self.canvas.set_show_cross_line(enabled) def brightnessContrast(self, value): dialog = BrightnessContrastDialog( utils.img_data_to_pil(self.imageData), self.onNewBrightnessContrast, parent=self, ) brightness, contrast = self.brightnessContrast_values.get( self.filename, (None, None) ) if brightness is not None: dialog.slider_brightness.setValue(brightness) if contrast is not None: dialog.slider_contrast.setValue(contrast) dialog.exec() brightness = dialog.slider_brightness.value() contrast = dialog.slider_contrast.value() self.brightnessContrast_values[self.filename] = (brightness, contrast) def togglePolygons(self, value): for item in self.labelList: item.setCheckState(Qt.CheckState.Checked if value else Qt.CheckState.Unchecked) def loadFile(self, filename=None): """Load the specified file, or the last opened file if None.""" # changing fileListWidget loads file if filename in self.imageList and ( self.fileListWidget.currentRow() != self.imageList.index(filename) ): self.fileListWidget.setCurrentRow(self.imageList.index(filename)) self.fileListWidget.repaint() return self.resetState() self.canvas.setEnabled(False) if filename is None: filename = self.settings.value("filename", "") filename = str(filename) if not QtCore.QFile.exists(filename): print(f"File {filename} does not exist") self.errorMessage( self.tr("Error opening file"), self.tr("No such file: %s") % filename, ) return False # assumes same name, but json extension self.status(self.tr("Loading %s...") % osp.basename(str(filename))) label_file = osp.splitext(filename)[0] + ".json" if self.output_dir: label_file_without_path = osp.basename(label_file) label_file = osp.join(self.output_dir, label_file_without_path) if QtCore.QFile.exists(label_file) and LabelFile.is_label_file( label_file ): try: self.labelFile = LabelFile(label_file) except LabelFileError as e: self.errorMessage( self.tr("Error opening file"), self.tr( "

%s

" "

Make sure %s is a valid label file." ) % (e, label_file), ) self.status(self.tr("Error reading %s") % label_file) return False self.imageData = self.labelFile.imageData self.imagePath = osp.join( osp.dirname(label_file), self.labelFile.imagePath, ) self.otherData = self.labelFile.otherData else: self.imageData = LabelFile.load_image_file(filename) if self.imageData: self.imagePath = filename self.labelFile = None image = QtGui.QImage.fromData(self.imageData) if image.isNull(): formats = [ "*.{}".format(fmt.data().decode()) for fmt in QtGui.QImageReader.supportedImageFormats() ] self.errorMessage( self.tr("Error opening file"), self.tr( "

Make sure {0} is a valid image file.
" "Supported image formats: {1}

" ).format(filename, ",".join(formats)), ) self.status(self.tr("Error reading %s") % filename) return False self.image = image self.CURRENT_FRAME_IMAGE = cv2.imread(filename) self.filename = filename if self._config["keep_prev"]: prev_shapes = self.canvas.shapes self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image)) flags = {k: False for k in self._config["flags"] or []} if self.labelFile: self.actions.export.setEnabled(True) self.CURRENT_SHAPES_IN_IMG = self.labelFile.shapes self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image)) self.loadLabels(self.labelFile.shapes) if self.labelFile.flags is not None: flags.update(self.labelFile.flags) self.loadFlags(flags) if self._config["keep_prev"] and self.noShapes(): self.loadShapes(prev_shapes, replace=False) self.setDirty() else: self.setClean() self.canvas.setEnabled(True) # set zoom values is_initial_load = not self.zoom_values if self.filename in self.zoom_values: self.zoomMode = self.zoom_values[self.filename][0] self.setZoom(self.zoom_values[self.filename][1]) elif is_initial_load or not self._config["keep_prev_scale"]: self.adjustScale(initial=True) # set scroll values for orientation in self.scroll_values: if self.filename in self.scroll_values[orientation]: self.setScroll( orientation, self.scroll_values[orientation][self.filename] ) # after loading the image, clear SAM instance if exists if self.sam_predictor is not None: self.sam_predictor.clear_logit() self.canvas.SAM_coordinates = [] # set brightness constrast values dialog = BrightnessContrastDialog( utils.img_data_to_pil(self.imageData), self.onNewBrightnessContrast, parent=self, ) brightness, contrast = self.brightnessContrast_values.get( self.filename, (None, None) ) if self._config["keep_prev_brightness"] and self.recentFiles: brightness, _ = self.brightnessContrast_values.get( self.recentFiles[0], (None, None) ) if self._config["keep_prev_contrast"] and self.recentFiles: _, contrast = self.brightnessContrast_values.get( self.recentFiles[0], (None, None) ) if brightness is not None: dialog.slider_brightness.setValue(brightness) if contrast is not None: dialog.slider_contrast.setValue(contrast) self.brightnessContrast_values[self.filename] = (brightness, contrast) if brightness is not None or contrast is not None: dialog.onNewValue(None) self.paintCanvas() self.addRecentFile(self.filename) self.toggleActions(True) self.canvas.setFocus() self.status(self.tr("Loaded %s") % osp.basename(str(filename))) return True def resizeEvent(self, event): if ( self.canvas and not self.image.isNull() and self.zoomMode != self.MANUAL_ZOOM ): self.adjustScale() super(MainWindow, self).resizeEvent(event) def paintCanvas(self): assert not self.image.isNull(), "cannot paint null image" self.canvas.scale = 0.01 * self.zoomWidget.value() self.canvas.adjustSize() self.canvas.update() def adjustScale(self, initial=False): value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]() value = int(100 * value) self.zoomWidget.setValue(value) self.zoom_values[self.filename] = (self.zoomMode, value) def scaleFitWindow(self): """Figure out the size of the pixmap to fit the main widget.""" e = 2.0 # So that no scrollbars are generated. w1 = self.centralWidget().width() - e h1 = self.centralWidget().height() - e a1 = w1 / h1 # Calculate a new scale value based on the pixmap's aspect ratio. w2 = self.canvas.pixmap.width() - 0.0 h2 = self.canvas.pixmap.height() - 0.0 a2 = w2 / h2 return w1 / w2 if a2 >= a1 else h1 / h2 def scaleFitWidth(self): # The epsilon does not seem to work too well here. w = self.centralWidget().width() - 2.0 return w / self.canvas.pixmap.width() def enableSaveImageWithData(self, enabled): self._config["store_data"] = enabled self.actions.saveWithImageData.setChecked(enabled) def closeEvent(self, event): if not self.mayContinue(): event.ignore() else: self.Escape_clicked() self.settings.setValue( "filename", self.filename if self.filename else "" ) self.settings.setValue("window/size", self.size()) self.settings.setValue("window/position", self.pos()) self.settings.setValue("window/state", self.saveState()) self.settings.setValue("recentFiles", self.recentFiles) # ask the use for where to save the labels # self.settings.setValue('window/geometry', self.saveGeometry()) def dragEnterEvent(self, event): extensions = [ ".%s" % fmt.data().decode().lower() for fmt in QtGui.QImageReader.supportedImageFormats() ] if event.mimeData().hasUrls(): items = [i.toLocalFile() for i in event.mimeData().urls()] if any([i.lower().endswith(tuple(extensions)) for i in items]): event.accept() else: event.ignore() def dropEvent(self, event): if not self.mayContinue(): event.ignore() return items = [i.toLocalFile() for i in event.mimeData().urls()] self.importDroppedImageFiles(items) # User Dialogs # def loadRecent(self, filename): if self.mayContinue(): self.loadFile(filename) def change_curr_model(self, model_name): """ Summary: Change current model to the model_name Args: model_name (str): name of the model to be changed to """ self.multi_model_flag = False self.waitWindow( visible=True, text=f'Please Wait.\n{model_name} is being Loaded...') self.intelligenceHelper.current_model_name, self.intelligenceHelper.current_mm_model = self.intelligenceHelper.make_mm_model( model_name) self.waitWindow() def model_explorer(self): """ Summary: Open model explorer dialog to select or download models """ self._config = get_config() model_explorer_dialog = utils.ModelExplorerDialog( self, self._config["mute"], notification.PopUp) # make it fit its contents model_explorer_dialog.adjustSize() model_explorer_dialog.setMinimumWidth( model_explorer_dialog.table.width() * 1.5) model_explorer_dialog.setMinimumHeight( model_explorer_dialog.table.rowHeight(0) * 10) model_explorer_dialog.exec() # init intelligence again if it's the first model if self.helper_first_time_flag: try: self.intelligenceHelper = Intelligence(self) except: print( "it seems you have a problem with initializing model\ncheck you have at least one model") self.helper_first_time_flag = True else: self.helper_first_time_flag = False mathOps.update_saved_models_json(os.getcwd()) selected_model_name, config, checkpoint = model_explorer_dialog.selected_model if selected_model_name != -1: self.intelligenceHelper.current_model_name, self.intelligenceHelper.current_mm_model = self.intelligenceHelper.make_mm_model_more( selected_model_name, config, checkpoint) self.updateSamControls() def openNextImg(self, _value=False, load=True): self.refresh_image_MODE() keep_prev = self._config["keep_prev"] if not self.mayContinue(): return if len(self.imageList) <= 0: return filename = None if self.filename is None: filename = self.imageList[0] else: currIndex = self.imageList.index(self.filename) if currIndex + 1 < len(self.imageList): filename = self.imageList[currIndex + 1] else: filename = self.imageList[-1] self.filename = filename if self.filename and load: self.loadFile(self.filename) self._config["keep_prev"] = keep_prev self.refresh_image_MODE() def openFile(self, _value=False): self.actions.export.setEnabled(False) try: cv2.destroyWindow('video processing') except: pass if not self.mayContinue(): return path = osp.dirname(str(self.filename)) if self.filename else "." formats = [ "*.{}".format(fmt.data().decode()) for fmt in QtGui.QImageReader.supportedImageFormats() ] filters = self.tr("Image & Label files (%s)") % " ".join( formats + ["*%s" % LabelFile.suffix] ) filename = QtWidgets.QFileDialog.getOpenFileName( self, self.tr("%s - Choose Image or Label file") % __appname__, path, filters, ) filename, _ = filename filename = str(filename) if filename: self.reset_for_new_mode("img") self.loadFile(filename) self.refresh_image_MODE() self.set_video_controls_visibility(False) self.filename = filename # clear the file list widget self.fileListWidget.clear() self.uniqLabelList.clear() # enable Visualization Options for option in self.vis_options: if option in [self.id_checkBox, self.traj_checkBox, self.trajectory_length_lineEdit]: option.setEnabled(False) else: option.setEnabled(True) def changeOutputDirDialog(self, _value=False): default_output_dir = self.output_dir if default_output_dir is None and self.filename: default_output_dir = osp.dirname(self.filename) if default_output_dir is None: default_output_dir = self.currentPath() output_dir = QtWidgets.QFileDialog.getExistingDirectory( self, self.tr("%s - Save/Load Annotations in Directory") % __appname__, default_output_dir, QtWidgets.QFileDialog.Option.ShowDirsOnly | QtWidgets.QFileDialog.Option.DontResolveSymlinks, ) output_dir = str(output_dir) if not output_dir: return self.output_dir = output_dir self.statusBar().showMessage( self.tr("%s . Annotations will be saved/loaded in %s") % ("Change Annotations Dir", self.output_dir) ) self.statusBar().show() current_filename = self.filename self.importDirImages(self.lastOpenDir, load=False) if current_filename in self.imageList: # retain currently selected file self.fileListWidget.setCurrentRow( self.imageList.index(current_filename) ) self.fileListWidget.repaint() def saveFile(self, _value=False): assert not self.image.isNull(), "cannot save empty image" if self.labelFile: # DL20180323 - overwrite when in directory self.save_path = self.labelFile.filename self._saveFile(self.save_path) elif self.output_file: self.save_path = self.output_file self._saveFile(self.save_path) self.close() else: self.save_path = self.saveFileDialog() self._saveFile(self.save_path) if self.save_path is not None and self.save_path != "": self.actions.export.setEnabled(True) def exportData(self): """ Export data to COCO, MOT, video, and custom exports, depending on the current annotation mode. If the current annotation mode is "video", the function prompts the user to select which types of exports to perform (COCO, MOT, video, and/or custom exports), and then prompts the user to select the output file path for each export type that was selected. The function then exports the data to the selected file paths. If the current annotation mode is "img" or "dir", the function prompts the user to select the output file path for a COCO export, and then exports the data to the selected file path. If an error occurs during the export process, the function displays an error message. Otherwise, the function displays a success message. """ try: if self.current_annotation_mode == "video": # Get user input for export options result, coco_radio, mot_radio, video_radio, custom_exports_radio_checked_list = exportData_UI.PopUp() if not result: return json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json' pth = "" # Check which radio button is checked and export accordingly if video_radio: # Get user input for video export path folderDialog = utils.FolderDialog( "tracking_results.mp4", "mp4") if folderDialog.exec(): pth = self.export_as_video_button_clicked( folderDialog.selectedFiles()[0]) else: return if coco_radio: # Get user input for COCO export path folderDialog = utils.FolderDialog("coco.json", "json") if folderDialog.exec(): pth = utils.exportCOCOvid( json_file_name, self.CURRENT_VIDEO_WIDTH, self.CURRENT_VIDEO_HEIGHT, folderDialog.selectedFiles()[0]) else: return if mot_radio: # Get user input for MOT export path folderDialog = utils.FolderDialog("mot.txt", "txt") if folderDialog.exec(): pth = utils.exportMOT( json_file_name, folderDialog.selectedFiles()[0]) else: return # custom exports custom_exports_list_video = [ custom_export for custom_export in custom_exports_list if custom_export.mode == "video"] if len(custom_exports_radio_checked_list) != 0: for i in range(len(custom_exports_radio_checked_list)): if custom_exports_radio_checked_list[i]: # Get user input for custom export path folderDialog = utils.FolderDialog( f"{custom_exports_list_video[i].file_name}.{custom_exports_list_video[i].format}", custom_exports_list_video[i].format) if folderDialog.exec(): try: pth = custom_exports_list_video[i]( json_file_name, self.CURRENT_VIDEO_WIDTH, self.CURRENT_VIDEO_HEIGHT, folderDialog.selectedFiles()[0]) except Exception as e: MsgBox.OKmsgBox( f"Error", f"Error: with custom export {custom_exports_list_video[i].button_name}\n check the parameters matches the specified ones in custom_exports.py\n Error Message: {e}", "critical") else: return # Image and Directory modes elif self.current_annotation_mode == "img" or self.current_annotation_mode == "dir": result, coco_radio, custom_exports_radio_checked_list = exportData_UI.PopUp( mode="image") if not result: return save_path = self.save_path if self.save_path else self.labelFile.filename json_paths = utils.parse_img_export(self.target_directory, save_path) # Check which radio button is checked and export accordingly # COCO export if coco_radio: # Get user input for COCO export path folderDialog = utils.FolderDialog("coco.json", "json") if folderDialog.exec(): pth = utils.exportCOCO( json_paths, folderDialog.selectedFiles()[0]) else: return # custom exports custom_exports_list_image = [ custom_export for custom_export in custom_exports_list if custom_export.mode == "image"] if len(custom_exports_radio_checked_list) != 0: for i in range(len(custom_exports_radio_checked_list)): if custom_exports_radio_checked_list[i]: # Get user input for custom export path folderDialog = utils.FolderDialog( f"{custom_exports_list_image[i].file_name}.{custom_exports_list_image[i].format}", custom_exports_list_image[i].format) if folderDialog.exec(): try: pth = custom_exports_list_image[i]( json_paths, folderDialog.selectedFiles()[0]) except Exception as e: MsgBox.OKmsgBox( f"Error", f"Error: with custom export {custom_exports_list_image[i].button_name}\n check the parameters matches the specified ones in custom_exports.py\n Error Message: {e}", "critical") else: return except Exception as e: # Error QMessageBox msg = QtWidgets.QMessageBox() msg.setIcon(QtWidgets.QMessageBox.Icon.Critical) msg.setText(f"Error\n {e}") msg.setWindowTitle( "Export Error") # print exception and error line to terminal print(e) msg.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok) msg.exec() return else: # display QMessageBox with ok button and label "Exporting COCO" msg = QtWidgets.QMessageBox() try: if pth not in ["", None, False]: msg.setIcon(QtWidgets.QMessageBox.Icon.Information) msg.setText(f"Annotations exported successfully to {pth}") msg.setWindowTitle("Export Success") else: msg.setIcon(QtWidgets.QMessageBox.Icon.Critical) msg.setText(f"Export Failed") msg.setWindowTitle("Export Failed") except: msg.setIcon(QtWidgets.QMessageBox.Icon.Critical) msg.setText(f"Export Failed") msg.setWindowTitle("Export Failed") msg.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok) msg.exec() def saveFileAs(self, _value=False): self.actions.export.setEnabled(True) assert not self.image.isNull(), "cannot save empty image" self.save_path = self.saveFileDialog() self._saveFile(self.save_path) def saveFileDialog(self): caption = self.tr("%s - Choose File") % __appname__ filters = self.tr("Label files (*%s)") % LabelFile.suffix if self.output_dir: dlg = QtWidgets.QFileDialog( self, caption, self.output_dir, filters ) else: dlg = QtWidgets.QFileDialog( self, caption, self.currentPath(), filters ) dlg.setDefaultSuffix(LabelFile.suffix[1:]) dlg.setAcceptMode(QtWidgets.QFileDialog.AcceptMode.AcceptSave) dlg.setOption(QtWidgets.QFileDialog.Option.DontConfirmOverwrite, False) dlg.setOption(QtWidgets.QFileDialog.Option.DontUseNativeDialog, False) basename = osp.basename(osp.splitext(self.filename)[0]) if self.output_dir: default_labelfile_name = osp.join( self.output_dir, basename + LabelFile.suffix ) else: default_labelfile_name = osp.join( self.currentPath(), basename + LabelFile.suffix ) filename = dlg.getSaveFileName( self, self.tr("Choose File"), default_labelfile_name, self.tr("Label files (*%s)") % LabelFile.suffix, ) if isinstance(filename, tuple): filename, _ = filename return filename def _saveFile(self, filename): if filename and self.saveLabels(filename): self.addRecentFile(filename) self.setClean() def closeFile(self, _value=False): if not self.mayContinue(): return self.resetState() self.setClean() self.toggleActions(False) self.canvas.setEnabled(False) self.actions.saveAs.setEnabled(False) # clear the file list widget self.fileListWidget.clear() self.uniqLabelList.clear() self.current_annotation_mode = "" self.right_click_menu() for option in self.vis_options: option.setEnabled(False) def getLabelFile(self): if self.filename.lower().endswith(".json"): label_file = self.filename else: label_file = osp.splitext(self.filename)[0] + ".json" return label_file def deleteFile(self): mb = QtWidgets.QMessageBox msg = self.tr( "You are about to permanently delete this label file, " "proceed anyway?" ) answer = mb.warning(self, self.tr("Attention"), msg, mb.StandardButton.Yes | mb.StandardButton.No) if answer != mb.StandardButton.Yes: return label_file = self.getLabelFile() if osp.exists(label_file): os.remove(label_file) logger.info("Label file is removed: {}".format(label_file)) item = self.fileListWidget.currentItem() item.setCheckState(Qt.CheckState.Unchecked) self.resetState() # Message Dialogs. # def hasLabels(self): if self.noShapes(): self.errorMessage( "No objects labeled", "You must label at least one object to save the file.", ) return False return True def hasLabelFile(self): if self.filename is None: return False label_file = self.getLabelFile() return osp.exists(label_file) def mayContinue(self): if not self.dirty: return True mb = QtWidgets.QMessageBox msg = self.tr('Save annotations to "{}" before closing?').format( self.filename ) answer = mb.question( self, self.tr("Save annotations?"), msg, mb.StandardButton.Save | mb.StandardButton.Discard | mb.StandardButton.Cancel, mb.StandardButton.Save, ) if answer == mb.StandardButton.Discard: return True elif answer == mb.StandardButton.Save: self.saveFile() return True else: # answer == mb.Cancel return False def errorMessage(self, title, message): msg_box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Icon.Critical, title, message) msg_box.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok) return msg_box def currentPath(self): return osp.dirname(str(self.filename)) if self.filename else "." def toggleKeepPrevMode(self): self._config["keep_prev"] = not self._config["keep_prev"] def removeSelectedPoint(self): self.canvas.removeSelectedPoint() if not self.canvas.hShape.points: self.canvas.deleteShape(self.canvas.hShape) self.remLabels([self.canvas.hShape]) self.setDirty() if self.noShapes(): for action in self.actions.onShapesPresent: action.setEnabled(False) def deleteSelectedShape(self): try: if len(self.canvas.selectedShapes) == 0: return yes, no = QtWidgets.QMessageBox.StandardButton.Yes, QtWidgets.QMessageBox.StandardButton.No msg = self.tr( "You are about to permanently delete {} polygons, " "proceed anyway?" ).format(len(self.canvas.selectedShapes)) if yes == QtWidgets.QMessageBox.warning( self, self.tr("Attention"), msg, yes | no, yes ): deleted_shapes = self.canvas.deleteSelected() deleted_ids = [shape.group_id for shape in deleted_shapes] self.remLabels(deleted_shapes) self.setDirty() if self.noShapes(): for action in self.actions.onShapesPresent: action.setEnabled(False) if self.current_annotation_mode == 'img' or self.current_annotation_mode == 'dir': self.refresh_image_MODE() return # if video mode result, self.featuresOptions, fromFrameVAL, toFrameVAL = deleteSelectedShape_UI.PopUp( self.TOTAL_VIDEO_FRAMES, self.INDEX_OF_CURRENT_FRAME, self.featuresOptions) if result == QtWidgets.QDialog.DialogCode.Accepted: for deleted_id in deleted_ids: self.delete_ids_from_all_frames( [deleted_id], from_frame=fromFrameVAL, to_frame=toFrameVAL) self.main_video_frames_slider_changed() except Exception as e: MsgBox.OKmsgBox(f"Error", f"Error: {e}", "critical") def delete_ids_from_all_frames(self, deleted_ids, from_frame, to_frame): """ Summary: Delete ids from a range of frames Args: deleted_ids (list): list of ids to be deleted from_frame (int): starting frame to_frame (int): ending frame """ from_frame, to_frame = np.min( [from_frame, to_frame]), np.max([from_frame, to_frame]) listObj = self.load_objects_from_json__orjson() for i in range(from_frame - 1, to_frame, 1): frame_idx = listObj[i]['frame_idx'] for object_ in listObj[i]['frame_data']: id = object_['tracker_id'] if id in deleted_ids: listObj[i]['frame_data'].remove(object_) self.CURRENT_ANNOATAION_TRAJECTORIES['id_' + str(id)][frame_idx - 1] = (-1, -1) self.rec_frame_for_id(id, frame_idx, type_='remove') self.load_objects_to_json__orjson(listObj) def copyShape(self): """ Summary: Copy selected shape in right click menu. is NOT saved in the clipboard """ if len(self.canvas.selectedShapes) > 1 and self.current_annotation_mode == 'video': org = copy.deepcopy(self.canvas.shapes) self.canvas.endMove(copy=True) self.canvas.undoLastLine() self.canvas.shapesBackups.pop() self.canvas.shapes = org self.update_current_frame_annotation_button_clicked() return elif self.current_annotation_mode == 'video': self.canvas.endMove(copy=True) shape = self.canvas.selectedShapes[0] text = shape.label text, flags, group_id, content = self.labelDialog.popUp(text) shape.group_id = -1 shape.content = content shape.label = text shape.flags = flags group_id, text = getIDfromUser_UI.PopUp(self, group_id, text) if text: self.labelList.clearSelection() shape = self.canvas.setLastLabel(text, flags) shape.group_id = group_id self.addLabel(shape) self.rec_frame_for_id( shape.group_id, self.INDEX_OF_CURRENT_FRAME) self.actions.editMode.setEnabled(True) self.actions.undoLastPoint.setEnabled(False) self.actions.undo.setEnabled(True) self.setDirty() else: self.canvas.undoLastLine() self.canvas.shapesBackups.pop() self.update_current_frame_annotation_button_clicked() return self.canvas.endMove(copy=True) for shape in self.canvas.selectedShapes: self.addLabel(shape) self.labelList.clearSelection() self.setDirty() def moveShape(self): self.canvas.endMove(copy=False) self.setDirty() if self.current_annotation_mode == 'video': self.update_current_frame_annotation_button_clicked() def openDirDialog(self, _value=False, dirpath=None): if not self.mayContinue(): return defaultOpenDirPath = dirpath if dirpath else "." if self.lastOpenDir and osp.exists(self.lastOpenDir): defaultOpenDirPath = self.lastOpenDir else: defaultOpenDirPath = ( osp.dirname(self.filename) if self.filename else "." ) targetDirPath = str( QtWidgets.QFileDialog.getExistingDirectory( self, self.tr("%s - Open Directory") % __appname__, defaultOpenDirPath, QtWidgets.QFileDialog.Option.ShowDirsOnly | QtWidgets.QFileDialog.Option.DontResolveSymlinks, ) ) self.target_directory = targetDirPath self.importDirImages(targetDirPath) self.set_video_controls_visibility(False) # enable Visualization Options for option in self.vis_options: if option in [self.id_checkBox, self.traj_checkBox, self.trajectory_length_lineEdit]: option.setEnabled(False) else: option.setEnabled(True) @property def imageList(self): lst = [] for i in range(self.fileListWidget.count()): item = self.fileListWidget.item(i) lst.append(item.text()) return lst def importDroppedImageFiles(self, imageFiles): extensions = [ ".%s" % fmt.data().decode().lower() for fmt in QtGui.QImageReader.supportedImageFormats() ] self.filename = None for file in imageFiles: if file in self.imageList or not file.lower().endswith( tuple(extensions) ): continue label_file = osp.splitext(file)[0] + ".json" if self.output_dir: label_file_without_path = osp.basename(label_file) label_file = osp.join(self.output_dir, label_file_without_path) item = QtWidgets.QListWidgetItem(file) # item.setFlags(Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable) if QtCore.QFile.exists(label_file) and LabelFile.is_label_file( label_file ): item.setCheckState(Qt.CheckState.Checked) else: item.setCheckState(Qt.CheckState.Unchecked) self.fileListWidget.addItem(item) self.openNextImg() def importDirImages(self, dirpath, pattern=None, load=True): self.actions.export.setEnabled(True) if not self.mayContinue() or not dirpath: return self.reset_for_new_mode("dir") self.lastOpenDir = dirpath self.filename = None self.fileListWidget.clear() self.uniqLabelList.clear() for filename in self.scanAllImages(dirpath): if pattern and pattern not in filename: continue label_file = osp.splitext(filename)[0] + ".json" if self.output_dir: label_file_without_path = osp.basename(label_file) label_file = osp.join(self.output_dir, label_file_without_path) item = QtWidgets.QListWidgetItem(filename) # item.setFlags(Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable) if QtCore.QFile.exists(label_file) and LabelFile.is_label_file( label_file ): item.setCheckState(Qt.CheckState.Checked) else: item.setCheckState(Qt.CheckState.Unchecked) self.fileListWidget.addItem(item) self.openNextImg(load=load) self.fileListWidget.horizontalScrollBar().setValue( self.fileListWidget.horizontalScrollBar().maximum() ) def scanAllImages(self, folderPath): extensions = [ ".%s" % fmt.data().decode().lower() for fmt in QtGui.QImageReader.supportedImageFormats() ] images = [] for root, dirs, files in os.walk(folderPath): for file in files: if file.lower().endswith(tuple(extensions)): relativePath = osp.join(root, file) images.append(relativePath) images.sort(key=lambda x: x.lower()) return images def refresh_image_MODE(self, fromSignal=False): try: if self.current_annotation_mode == "video" and not fromSignal: return self.CURRENT_SHAPES_IN_IMG = mathOps.convert_qt_shapes_to_shapes(self.canvas.shapes) imageX = visualizations.draw_bb_on_image_MODE(self.CURRENT_ANNOATAION_FLAGS, self.image, self.CURRENT_SHAPES_IN_IMG) self.labelList.clear() self.canvas.loadPixmap(QtGui.QPixmap.fromImage(imageX)) self.loadLabels(self.CURRENT_SHAPES_IN_IMG) except: pass def annotate_one(self, called_from_tracking=False): areaFlag = len(self.canvas.tracking_area_polygon) > 2 if areaFlag: dims = self.CURRENT_FRAME_IMAGE.shape area_points = self.canvas.tracking_area_polygon [x1, y1, x2, y2] = mathOps.track_area_adjustedBboex( area_points, dims, ratio=0.1) targetImage = self.CURRENT_FRAME_IMAGE[y1: y2, x1: x2] else: targetImage = self.CURRENT_FRAME_IMAGE try: if self.current_annotation_mode != "video": if os.path.exists(self.filename): self.labelList.clearSelection() if self.multi_model_flag: shapes = self.intelligenceHelper.get_shapes_of_one( targetImage, img_array_flag=True, multi_model_flag=True) else: shapes = self.intelligenceHelper.get_shapes_of_one( targetImage, img_array_flag=True) if areaFlag: shapes = mathOps.adjust_shapes_to_original_image( shapes, x1, y1, area_points) if self.current_annotation_mode == "video" and called_from_tracking: return shapes except Exception as e: MsgBox.OKmsgBox("Error", f"{e}", "critical") return imageX = visualizations.draw_bb_on_image_MODE(self.CURRENT_ANNOATAION_FLAGS, self.image, shapes) # clear shapes already in lablelist (fixes saving multiple shapes of same object bug) self.labelList.clear() self.CURRENT_SHAPES_IN_IMG = shapes self.canvas.loadPixmap(QtGui.QPixmap.fromImage(imageX)) self.loadLabels(self.CURRENT_SHAPES_IN_IMG) self.actions.editMode.setEnabled(True) self.actions.undoLastPoint.setEnabled(False) self.actions.undo.setEnabled(True) self.setDirty() def annotate_batch(self): images = [] self._config = get_config() notif = [self._config["mute"], self, notification.PopUp] for filename in self.imageList: images.append(filename) if self.multi_model_flag: self.intelligenceHelper.get_shapes_of_batch( images, multi_model_flag=True, notif=notif) else: self.intelligenceHelper.get_shapes_of_batch(images, notif=notif) def setConfThreshold(self): # if a threshold exists, pass it as the previous value if self.intelligenceHelper.conf_threshold: self.intelligenceHelper.conf_threshold = self.segmentation_options_UI.setConfThreshold( self.intelligenceHelper.conf_threshold) # if not, use the default value in the function as the previous value else: self.intelligenceHelper.conf_threshold = self.segmentation_options_UI.setConfThreshold() def setIOUThreshold(self): # if a threshold exists, pass it as the previous value if self.intelligenceHelper.iou_threshold: self.intelligenceHelper.iou_threshold = self.segmentation_options_UI.setIOUThreshold( self.intelligenceHelper.iou_threshold) # if not, use the default value in the function as the previous value else: self.intelligenceHelper.iou_threshold = self.segmentation_options_UI.setIOUThreshold() def selectClasses(self): print(" from intelligenceHelper:" + str(self.intelligenceHelper.selectedclasses)) self.intelligenceHelper.selectedclasses = self.segmentation_options_UI.selectClasses() def mergeSegModels(self): print(" from intelligenceHelper:" + str(self.intelligenceHelper.selectedmodels)) self.intelligenceHelper.selectedmodels = self.merge_feature_UI.mergeSegModels() # check if the user selected any models if len(self.intelligenceHelper.selectedmodels) == 0: print("No models selected") else: self.multi_model_flag = True def Segment_anything(self): # check the visibility of the sam toolbar if self.sam_toolbar.isVisible(): self.set_sam_toolbar_visibility(False) else: self.set_sam_toolbar_visibility(True) # VIDEO PROCESSING FUNCTIONS (ALL CONNECTED TO THE VIDEO PROCESSING TOOLBAR) def calculate_trajectories(self, frames=None): """ Summary: Calculate trajectories for all objects in the video Args: frames (list): list of frames to calculate trajectories for (default: None -> all frames) """ listObj = self.load_objects_from_json__orjson() if len(listObj) == 0: return frames = frames if frames else range(len(listObj)) for i in frames: listobjframe = listObj[i]['frame_idx'] for object in listObj[i]['frame_data']: id = object['tracker_id'] self.minID = min(self.minID, id - 1) self.rec_frame_for_id(id, listobjframe) label = object['class_name'] label_ascii = sum([ord(c) for c in label]) idx = label_ascii % len(color_palette) color = color_palette[idx] center = mathOps.centerOFmass(object['segment']) try: centers_rec = self.CURRENT_ANNOATAION_TRAJECTORIES['id_' + str( id)] try: (xp, yp) = centers_rec[listobjframe - 2] (xn, yn) = center if (xp == -1 or xn == -1): c = 5 / 0 r = 0.5 x = r * xn + (1 - r) * xp y = r * yn + (1 - r) * yp center = (int(x), int(y)) except: pass centers_rec[listobjframe - 1] = center self.CURRENT_ANNOATAION_TRAJECTORIES['id_' + str(id)] = centers_rec self.CURRENT_ANNOATAION_TRAJECTORIES['id_color_' + str( id)] = color except: centers_rec = [(-1, - 1)] * int(self.TOTAL_VIDEO_FRAMES) centers_rec[listobjframe - 1] = center self.CURRENT_ANNOATAION_TRAJECTORIES['id_' + str(id)] = centers_rec self.CURRENT_ANNOATAION_TRAJECTORIES['id_color_' + str( id)] = color def right_click_menu(self): """ Summary: Set the right click menu according to the current annotation mode """ self.set_sam_toolbar_enable(False) self.sam_model_comboBox.setCurrentIndex(0) self.sam_buttons_colors("x") # # right click menu # 0 createMode, # 1 editMode, # 2 edit, # 3 enhance, # 4 interpolate, # 5 mark_as_key, # 6 remove_all_keyframes, # 7 scale, # 8 copyShapes, # 9 pasteShapes, # 10 copy, # 11 delete, # 12 undo, # 13 undoLastPoint, # 14 addPointToEdge, # 15 removePoint, # 16 update_curr_frame, # 17 ignore_changes mode = self.current_annotation_mode video_menu_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 14, 15, 16, 17] image_menu_list = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15] if self.current_annotation_mode == "video": self.canvas.menus[0].clear() utils.addActions( self.canvas.menus[0], (self.actions.menu[i] for i in video_menu_list)) self.menus.edit.clear() utils.addActions( self.menus.edit, (self.actions.menu[i] for i in video_menu_list)) else: self.canvas.menus[0].clear() utils.addActions( self.canvas.menus[0], (self.actions.menu[i] for i in image_menu_list)) self.menus.edit.clear() utils.addActions( self.menus.edit, (self.actions.menu[i] for i in image_menu_list)) def reset_for_new_mode(self, mode): self.CURRENT_ANNOATAION_TRAJECTORIES = {'length': 30, 'alpha': 0.70} self.key_frames.clear() self.id_frames_rec.clear() for shape in self.canvas.shapes: self.canvas.deleteShape(shape) self.resetState() self.CURRENT_SHAPES_IN_IMG = [] self.image = QtGui.QImage() self.CURRENT_FRAME_IMAGE = None self.current_annotation_mode = mode self.canvas.current_annotation_mode = mode self.right_click_menu() self.global_listObj = [] self.minID = -2 self.maxID = 0 def openVideo(self): # enable export if json file exists try: cv2.destroyWindow('video processing') except: pass if not self.mayContinue(): return videoFile = QtWidgets.QFileDialog.getOpenFileName( self, self.tr("%s - Open Video") % __appname__, ".", self.tr("Video files (*.mp4 *.avi *.mov)") ) if videoFile[0]: # clear the file list widget self.fileListWidget.clear() self.uniqLabelList.clear() self.reset_for_new_mode("video") self.CURRENT_VIDEO_NAME = videoFile[0].split( ".")[-2].split("/")[-1] self.CURRENT_VIDEO_PATH = "/".join( videoFile[0].split(".")[-2].split("/")[:-1]) json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json' if os.path.exists(json_file_name): self.actions.export.setEnabled(True) else: self.actions.export.setEnabled(False) cap = cv2.VideoCapture(videoFile[0]) self.CURRENT_VIDEO_HEIGHT = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.CURRENT_VIDEO_WIDTH = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.CAP = cap self.TOTAL_VIDEO_FRAMES = int( self.CAP.get(cv2.CAP_PROP_FRAME_COUNT)) self.CURRENT_VIDEO_FPS = self.CAP.get(cv2.CAP_PROP_FPS) self.main_video_frames_slider.setMaximum(self.TOTAL_VIDEO_FRAMES) self.frames_to_track_slider.setMaximum( self.TOTAL_VIDEO_FRAMES - self.INDEX_OF_CURRENT_FRAME) self.main_video_frames_slider.setValue(2) self.INDEX_OF_CURRENT_FRAME = 1 self.main_video_frames_slider.setValue(self.INDEX_OF_CURRENT_FRAME) # self.addToolBarBreak self.set_video_controls_visibility(True) self.update_tracking_method() self.calculate_trajectories() keys = list(self.id_frames_rec.keys()) idsORG = [int(keys[i][3:]) for i in range(len(keys))] if len(idsORG) > 0: self.maxID = max(idsORG) for option in self.vis_options: option.setEnabled(True) # disable save and save as self.actions.save.setEnabled(False) self.actions.saveAs.setEnabled(False) def openVideoFrames(self): try: video_frame_extractor_dialog = utils.VideoFrameExtractor( self._config["mute"], notification.PopUp) video_frame_extractor_dialog.exec() dir_path_name = video_frame_extractor_dialog.path_name if dir_path_name: self.target_directory = dir_path_name self.importDirImages(dir_path_name) self.set_video_controls_visibility(False) # enable Visualization Options for option in self.vis_options: if option in [self.id_checkBox, self.traj_checkBox, self.trajectory_length_lineEdit]: option.setEnabled(False) else: option.setEnabled(True) except Exception as e: MsgBox.OKmsgBox("Error", f"Error: {e}", "critical") def load_shapes_for_video_frame(self, json_file_name, index): # this function loads the shapes for the video frame from the json file # first we read the json file in the form of a list # we need to parse from it data for the current frame target_frame_idx = index listObj = self.load_objects_from_json__orjson() listObj = np.array(listObj) shapes = [] i = target_frame_idx - 1 frame_objects = listObj[i]['frame_data'] for object_ in frame_objects: shape = {} shape["label"] = object_["class_name"] shape["group_id"] = (object_['tracker_id']) shape["content"] = (object_['confidence']) shape["bbox"] = object_['bbox'] points = object_['segment'] points = np.array(points, np.int16).flatten().tolist() shape["points"] = points shape["shape_type"] = "polygon" shape["other_data"] = {} shape["flags"] = {} shapes.append(shape) self.CURRENT_SHAPES_IN_IMG = shapes def loadFramefromVideo(self, frame_array, index=1): self.resetState() self.canvas.setEnabled(False) self.imageData = frame_array.data self.CURRENT_FRAME_IMAGE = frame_array image = QtGui.QImage(self.imageData, self.imageData.shape[1], self.imageData.shape[0], QtGui.QImage.Format.Format_BGR888) self.image = image if self._config["keep_prev"]: prev_shapes = self.canvas.shapes flags = {k: False for k in self._config["flags"] or []} self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image)) if self.TrackingMode: image = self.draw_bb_on_image(image, self.CURRENT_SHAPES_IN_IMG) self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image)) if len(self.CURRENT_SHAPES_IN_IMG) > 0: self.loadLabels(self.CURRENT_SHAPES_IN_IMG) else: if self.labelFile: self.CURRENT_SHAPES_IN_IMG = self.labelFile.shapes image = self.draw_bb_on_image( image, self.CURRENT_SHAPES_IN_IMG) self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image)) self.loadLabels(self.labelFile.shapes) if self.labelFile.flags is not None: flags.update(self.labelFile.flags) else: json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json' if os.path.exists(json_file_name): self.load_shapes_for_video_frame(json_file_name, index) image = self.draw_bb_on_image( image, self.CURRENT_SHAPES_IN_IMG) self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image)) if len(self.CURRENT_SHAPES_IN_IMG) > 0: self.loadLabels(self.CURRENT_SHAPES_IN_IMG) self.loadFlags(flags) self.setClean() self.canvas.setEnabled(True) # set zoom values is_initial_load = not self.zoom_values if self.filename in self.zoom_values: self.zoomMode = self.zoom_values[self.filename][0] self.setZoom(self.zoom_values[self.filename][1]) elif is_initial_load or not self._config["keep_prev_scale"]: self.adjustScale(initial=True) # set scroll values self.paintCanvas() self.toggleActions(True) self.canvas.setFocus() self.status(self.tr( f'Loaded {self.CURRENT_VIDEO_NAME} frame {self.INDEX_OF_CURRENT_FRAME}')) def nextFrame_buttonClicked(self): self.update_current_frame_annotation_button_clicked() # first assert that the new value of the slider is not greater than the total number of frames new_value = self.INDEX_OF_CURRENT_FRAME + self.FRAMES_TO_SKIP if new_value >= self.TOTAL_VIDEO_FRAMES: new_value = self.TOTAL_VIDEO_FRAMES self.main_video_frames_slider.setValue(new_value) def next_1_Frame_buttonClicked(self): self.update_current_frame_annotation_button_clicked() # first assert that the new value of the slider is not greater than the total number of frames new_value = self.INDEX_OF_CURRENT_FRAME + 1 if new_value >= self.TOTAL_VIDEO_FRAMES: new_value = self.TOTAL_VIDEO_FRAMES self.main_video_frames_slider.setValue(new_value) def previousFrame_buttonClicked(self): self.update_current_frame_annotation_button_clicked() new_value = self.INDEX_OF_CURRENT_FRAME - self.FRAMES_TO_SKIP if new_value <= 0: new_value = 0 self.main_video_frames_slider.setValue(new_value) def previous_1_Frame_buttonclicked(self): self.update_current_frame_annotation_button_clicked() new_value = self.INDEX_OF_CURRENT_FRAME - 1 if new_value <= 0: new_value = 0 self.main_video_frames_slider.setValue(new_value) def frames_to_skip_slider_changed(self): self.FRAMES_TO_SKIP = self.frames_to_skip_slider.value() zeros = (2 - int(np.log10(self.FRAMES_TO_SKIP + 0.9))) * '0' self.frames_to_skip_label.setText( 'Jump forward/backward frames: ' + zeros + str(self.FRAMES_TO_SKIP)) def playPauseButtonClicked(self): # we can check the state of the button by checking the button text if self.playPauseButton_mode == "Play": self.playPauseButton_mode = "Pause" self.playPauseButton.setShortcut(self._config['shortcuts']['play']) self.playPauseButton.setToolTip( f'Play ({self._config["shortcuts"]["play"]})') self.playPauseButton.setIcon( self.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_MediaPause)) # play the video at the current fps untill the user clicks pause self.play_timer = QtCore.QTimer(self) # use play_timer.timeout.connect to call a function every time the timer times out # but we need to call the function every interval of time # so we need to call the function every 1/fps seconds self.play_timer.timeout.connect(self.move_frame_by_frame) self.play_timer.start(40) # note that the timer interval is in milliseconds # while self.timer.isActive(): elif self.playPauseButton_mode == "Pause": # first stop the timer self.play_timer.stop() self.playPauseButton_mode = "Play" self.playPauseButton.setShortcut(self._config['shortcuts']['play']) self.playPauseButton.setToolTip( f'Pause ({self._config["shortcuts"]["play"]})') self.playPauseButton.setIcon( self.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_MediaPlay)) def move_frame_by_frame(self): QtWidgets.QApplication.processEvents() self.main_video_frames_slider.setValue(self.INDEX_OF_CURRENT_FRAME + 1) def main_video_frames_slider_changed(self): if self.current_annotation_mode != "video": return if self.sam_model_comboBox.currentIndex() != 0 and self.canvas.SAM_mode != "finished" and not self.TrackingMode: self.sam_clear_annotation_button_clicked() self.sam_buttons_colors("X") try: x = self.CURRENT_VIDEO_PATH except: return frame_idx = self.main_video_frames_slider.value() self.INDEX_OF_CURRENT_FRAME = frame_idx self.CAP.set(cv2.CAP_PROP_POS_FRAMES, frame_idx - 1) # setting text of labels fps = self.CAP.get(cv2.CAP_PROP_FPS) zeros = (int(np.log10(self.TOTAL_VIDEO_FRAMES + 0.9)) - int(np.log10(frame_idx + 0.9))) * '0' self.main_video_frames_label_1.setText( f'frame {zeros}{frame_idx} / {int(self.TOTAL_VIDEO_FRAMES)}') self.frame_time = mathOps.mapFrameToTime(frame_idx, fps) frame_text = ("%02d:%02d:%02d:%03d" % ( self.frame_time[0], self.frame_time[1], self.frame_time[2], self.frame_time[3])) video_duration = mathOps.mapFrameToTime(self.TOTAL_VIDEO_FRAMES, fps) video_duration_text = ("%02d:%02d:%02d:%03d" % ( video_duration[0], video_duration[1], video_duration[2], video_duration[3])) final_text = frame_text + " / " + video_duration_text self.main_video_frames_label_2.setText(f'time {final_text}') # reading the current frame from the video and loading it into the canvas success, img = self.CAP.read() if success: frame_array = np.array(img) self.loadFramefromVideo(frame_array, frame_idx) else: pass self.frames_to_track_slider.setMaximum( self.TOTAL_VIDEO_FRAMES - self.INDEX_OF_CURRENT_FRAME) def frames_to_track_input_changed(self, text): try: value = int(text) if 2 <= value <= self.frames_to_track_slider.maximum(): self.frames_to_track_slider.setValue(value) elif value > self.frames_to_track_slider.maximum(): self.frames_to_track_slider.setValue( self.frames_to_track_slider.maximum()) elif value < 2: self.frames_to_track_slider.setValue(1) except ValueError: pass def frames_to_track_slider_changed(self, value): self.frames_to_track_input.setText(str(value)) self.FRAMES_TO_TRACK = self.frames_to_track_slider.value() def track_assigned_objects_button_clicked(self): # first check if there is objects in self.canvas.shapes list or not . if not then output a error message and return if len(self.labelList.selectedItems()) == 0: self.errorMessage( "found No objects to track", "you need to assign at least one object to track", ) return self.TRACK_ASSIGNED_OBJECTS_ONLY = True self.track_buttonClicked() self.TRACK_ASSIGNED_OBJECTS_ONLY = False def update_gui_after_tracking(self, index): if index != self.FRAMES_TO_TRACK - 1: self.main_video_frames_slider.setValue( self.INDEX_OF_CURRENT_FRAME + 1) QtWidgets.QApplication.processEvents() def certain_area_clicked(self, index): self.canvas.cancelManualDrawing() self.setEditMode() self.canvas.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor)) if index == 0: self.canvas.tracking_area = "" self.canvas.tracking_area_polygon = [] else: self.canvas.tracking_area = "drawing" self.canvas.tracking_area_polygon = [] def track_dropdown_changed(self, index): self.selected_option = index def start_tracking_button_clicked(self): try: try: if self.selected_option == 0: self.track_buttonClicked() elif self.selected_option == 1: self.track_assigned_objects_button_clicked() elif self.selected_option == 2: self.track_full_video_button_clicked() except Exception as e: self.track_buttonClicked() except Exception as e: MsgBox.OKmsgBox("Error", f"Error: {e}", "critical") def track_buttonClicked(self): # Disable Exports & Change button text self.actions.export.setEnabled(False) self.tracking_progress_bar.setVisible(True) # first we need to check there is a json file with the same name as the video listObj = self.load_objects_from_json__orjson() existing_annotation = False shapes = self.canvas.shapes tracks_to_follow = None if len(shapes) > 0: existing_annotation = True tracks_to_follow = [] for shape in shapes: if shape.group_id != None: tracks_to_follow.append(int(shape.group_id)) self.TrackingMode = True curr_frame, prev_frame = None, None if self.FRAMES_TO_TRACK + self.INDEX_OF_CURRENT_FRAME <= self.TOTAL_VIDEO_FRAMES: number_of_frames_to_track = self.FRAMES_TO_TRACK else: number_of_frames_to_track = self.TOTAL_VIDEO_FRAMES - self.INDEX_OF_CURRENT_FRAME self.interrupted = False for i in range(number_of_frames_to_track): QtWidgets.QApplication.processEvents() if self.interrupted: self.interrupted = False break if i % 100 == 0: self.load_objects_to_json__orjson(listObj) self.tracking_progress_bar.setValue( int((i + 1) / number_of_frames_to_track * 100)) if existing_annotation: existing_annotation = False shapes = self.canvas.shapes shapes = mathOps.convert_qt_shapes_to_shapes(shapes) else: with torch.no_grad(): shapes = self.annotate_one(called_from_tracking=True) curr_frame = self.CURRENT_FRAME_IMAGE if len(shapes) == 0: self.update_gui_after_tracking(i) continue for shape in shapes: if shape['content'] is None: shape['content'] = 1.0 boxes, confidences, class_ids, segments = mathOps.get_boxes_conf_classids_segments( shapes) boxes = np.array(boxes, dtype=int) confidences = np.array(confidences) class_ids = np.array(class_ids) detections = Detections( xyxy=boxes, confidence=confidences, class_id=class_ids, ) boxes = torch.from_numpy(detections.xyxy) confidences = torch.from_numpy(detections.confidence) class_ids = torch.from_numpy(detections.class_id) dets = torch.cat((boxes, confidences.unsqueeze( 1), class_ids.unsqueeze(1)), dim=1) dets = dets.to(torch.float32) if hasattr(self.tracker, 'tracker') and hasattr(self.tracker.tracker, 'camera_update'): if prev_frame is not None and curr_frame is not None: # camera motion compensation self.tracker.tracker.camera_update(prev_frame, curr_frame) prev_frame = curr_frame with torch.no_grad(): org_tracks = self.tracker.update( dets.cpu(), self.CURRENT_FRAME_IMAGE) tracks = [] for org_track in org_tracks: track = [] for i in range(6): track.append(int(org_track[i])) track[4] += int(self.maxID) track.append(org_track[6]) tracks.append(track) matched_shapes, unmatched_shapes = mathOps.match_detections_with_tracks( shapes, tracks) shapes = matched_shapes self.CURRENT_SHAPES_IN_IMG = [ shape_ for shape_ in shapes if shape_["group_id"] is not None] if self.TRACK_ASSIGNED_OBJECTS_ONLY and tracks_to_follow is not None: try: if len(self.labelList.selectedItems()) != 0: tracks_to_follow = [] for item in self.labelList.selectedItems(): x = item.text() i1, i2 = x.find('D'), x.find(':') tracks_to_follow.append(int(x[i1 + 2:i2])) self.CURRENT_SHAPES_IN_IMG = [ shape_ for shape_ in shapes if shape_["group_id"] in tracks_to_follow] except: # this happens when the user selects a label that is not a tracked object so there is error in extracting the tracker id # show a message box to the user (hinting to use the tracker on the image first so that the label has a tracker id to be selected) self.errorMessage( 'Error', 'Please use the tracker on the image first so that you can select labels with IDs to track') return # to understand the json output file structure it is a dictionary of frames and each frame is a dictionary of tracker_ids and each tracker_id is a dictionary of bbox , confidence , class_id , segment json_frame = {} json_frame.update({'frame_idx': self.INDEX_OF_CURRENT_FRAME}) json_frame_object_list = [] for shape in self.CURRENT_SHAPES_IN_IMG: self.rec_frame_for_id( int(shape["group_id"]), self.INDEX_OF_CURRENT_FRAME, type_='add') json_tracked_object = {} json_tracked_object['tracker_id'] = int(shape["group_id"]) json_tracked_object['bbox'] = [int(i) for i in shape['bbox']] json_tracked_object['confidence'] = shape["content"] json_tracked_object['class_name'] = shape["label"] json_tracked_object['class_id'] = coco_classes.index( shape["label"]) if shape["label"] in coco_classes else -1 points = shape["points"] segment = [[int(points[z]), int(points[z + 1])] for z in range(0, len(points), 2)] json_tracked_object['segment'] = segment json_frame_object_list.append(json_tracked_object) json_frame.update({'frame_data': json_frame_object_list}) listObj[self.INDEX_OF_CURRENT_FRAME - 1] = json_frame QtWidgets.QApplication.processEvents() self.update_gui_after_tracking(i) print('finished tracking for frame ', self.INDEX_OF_CURRENT_FRAME) self.load_objects_to_json__orjson(listObj) # Notify the user that the tracking is finished self._config = get_config() if not self._config["mute"]: if not self.isActiveWindow(): notification.PopUp("Tracking Completed") self.TrackingMode = False self.labelFile = None self.main_video_frames_slider.setValue(self.INDEX_OF_CURRENT_FRAME - 1) self.main_video_frames_slider.setValue(self.INDEX_OF_CURRENT_FRAME) self.tracking_progress_bar.hide() self.tracking_progress_bar.setValue(0) # Enable Exports & Restore button Text and Color self.actions.export.setEnabled(True) def track_full_video_button_clicked(self): self.FRAMES_TO_TRACK = int( self.TOTAL_VIDEO_FRAMES - self.INDEX_OF_CURRENT_FRAME) self.track_buttonClicked() def set_video_controls_visibility(self, visible=False): # make it invisible by default self.videoControls.setVisible(visible) for widget in self.videoControls.children(): try: widget.setVisible(visible) except: pass self.videoControls_2.setVisible(visible) for widget in self.videoControls_2.children(): try: widget.setVisible(visible) except: pass def traj_checkBox_changed(self): try: self.CURRENT_ANNOATAION_FLAGS["traj"] = self.traj_checkBox.isChecked( ) self.update_current_frame_annotation() self.main_video_frames_slider_changed() except: pass def mask_checkBox_changed(self): try: self.CURRENT_ANNOATAION_FLAGS["mask"] = self.mask_checkBox.isChecked( ) self.update_current_frame_annotation() self.main_video_frames_slider_changed() except: pass self.refresh_image_MODE() def class_checkBox_changed(self): try: self.CURRENT_ANNOATAION_FLAGS["class"] = self.class_checkBox.isChecked( ) self.update_current_frame_annotation() self.main_video_frames_slider_changed() except: pass self.refresh_image_MODE() def conf_checkBox_changed(self): try: self.CURRENT_ANNOATAION_FLAGS["conf"] = self.conf_checkBox.isChecked( ) self.update_current_frame_annotation() self.main_video_frames_slider_changed() except: pass self.refresh_image_MODE() def id_checkBox_changed(self): try: self.CURRENT_ANNOATAION_FLAGS["id"] = self.id_checkBox.isChecked() self.update_current_frame_annotation() self.main_video_frames_slider_changed() except: pass def bbox_checkBox_changed(self): try: self.CURRENT_ANNOATAION_FLAGS["bbox"] = self.bbox_checkBox.isChecked( ) self.update_current_frame_annotation() self.main_video_frames_slider_changed() except: pass self.refresh_image_MODE() def polygons_visable_checkBox_changed(self): try: self.CURRENT_ANNOATAION_FLAGS["polygons"] = self.polygons_visable_checkBox.isChecked( ) self.update_current_frame_annotation() for shape in self.canvas.shapes: self.canvas.setShapeVisible( shape, self.CURRENT_ANNOATAION_FLAGS["polygons"]) except: pass def export_as_video_button_clicked(self, output_filename=None): self.update_current_frame_annotation() input_video_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}.mp4' output_video_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.mp4' if output_filename is not False: output_video_file_name = output_filename input_cap = cv2.VideoCapture(input_video_file_name) output_cap = cv2.VideoWriter(output_video_file_name, cv2.VideoWriter_fourcc( *'mp4v'), int(self.CURRENT_VIDEO_FPS), (int(self.CURRENT_VIDEO_WIDTH), int(self.CURRENT_VIDEO_HEIGHT))) listObj = self.load_objects_from_json__orjson() # make a progress bar for exporting video (with percentage of progress) TO DO LATER empty_frame = False empty_video = True for target_frame_idx in range(self.TOTAL_VIDEO_FRAMES): try: self.INDEX_OF_CURRENT_FRAME = target_frame_idx + 1 ret, image = input_cap.read() shapes = [] frame_objects = listObj[target_frame_idx]['frame_data'] for object_ in frame_objects: shape = {} shape["label"] = object_['class_name'] shape["group_id"] = str(object_['tracker_id']) shape["content"] = str(object_['confidence']) shape["bbox"] = object_['bbox'] points = object_['segment'] points = np.array(points, np.int16).flatten().tolist() shape["points"] = points shape["shape_type"] = "polygon" shape["other_data"] = {} shape["flags"] = {} shapes.append(shape) if len(shapes) == 0: if not empty_frame: self.waitWindow(visible=True, text=f'Processing...') empty_frame = True continue self.waitWindow( visible=True, text=f'Please Wait.\nFrame {target_frame_idx} is being exported...') image = self.draw_bb_on_image( image, shapes, image_qt_flag=False) output_cap.write(image) empty_frame = False empty_video = False except: input_cap.release() output_cap.release() input_cap.release() output_cap.release() self.waitWindow() try: if empty_video: os.remove(output_video_file_name) return False except: pass self.INDEX_OF_CURRENT_FRAME = self.main_video_frames_slider.value() # show message saying that the video is exported if output_filename is False: MsgBox.OKmsgBox("Export Video", "Done Exporting Video") if output_filename is not False: return output_filename def clear_video_annotations_button_clicked(self): self.global_listObj = [] self.CURRENT_ANNOATAION_TRAJECTORIES = {'length': 30, 'alpha': 0.70} self.key_frames.clear() self.id_frames_rec.clear() self.minID = -2 self.maxID = 0 for shape in self.canvas.shapes: self.canvas.deleteShape(shape) self.CURRENT_SHAPES_IN_IMG = [] # just delete the json file and reload the video # to delete the json file we need to know the name of the json file which is the same as the video name json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json' # now delete the json file if it exists if os.path.exists(json_file_name): os.remove(json_file_name) MsgBox.OKmsgBox("clear annotations", "All video frames annotations are cleared") self.main_video_frames_slider.setValue(2) self.main_video_frames_slider.setValue(1) def update_current_frame_annotation_button_clicked(self): if self.sam_model_comboBox.currentIndex() != 0 and self.canvas.SAM_mode != "finished" and not self.TrackingMode: self.sam_clear_annotation_button_clicked() try: x = self.CURRENT_VIDEO_PATH except: return self.update_current_frame_annotation() self.main_video_frames_slider_changed() def update_current_frame_annotation(self): if self.current_annotation_mode != "video": return listObj = self.load_objects_from_json__orjson() json_frame = {} json_frame.update({'frame_idx': self.INDEX_OF_CURRENT_FRAME}) json_frame_object_list = [] shapes = mathOps.convert_qt_shapes_to_shapes(self.canvas.shapes) for shape in shapes: json_tracked_object = {} if shape["group_id"] != None: json_tracked_object['tracker_id'] = int(shape["group_id"]) else: json_tracked_object['tracker_id'] = self.minID self.minID -= 1 bbox = shape["bbox"] bbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])] json_tracked_object['bbox'] = bbox json_tracked_object['confidence'] = str( shape["content"] if shape["content"] != None else 1) json_tracked_object['class_name'] = shape["label"] json_tracked_object['class_id'] = coco_classes.index( shape["label"]) if shape["label"] in coco_classes else -1 points = shape["points"] segment = [[int(points[z]), int(points[z + 1])] for z in range(0, len(points), 2)] json_tracked_object['segment'] = segment json_frame_object_list.append(json_tracked_object) json_frame.update({'frame_data': json_frame_object_list}) listObj[self.INDEX_OF_CURRENT_FRAME - 1] = json_frame self.load_objects_to_json__orjson(listObj) print("saved frame annotation") def trajectory_length_lineEdit_changed(self): try: text = self.trajectory_length_lineEdit.text() self.CURRENT_ANNOATAION_TRAJECTORIES['length'] = int( text) if text != '' else 1 self.main_video_frames_slider_changed() except: pass def addVideoControls(self): # add video controls toolbar with custom style (background color , spacing , hover color) self.videoControls = QtWidgets.QToolBar() self.videoControls.setMovable(True) self.videoControls.setFloatable(True) self.videoControls.setObjectName("videoControls") self.videoControls.setStyleSheet( "QToolBar#videoControls { border: 50px }") self.addToolBar(Qt.ToolBarArea.BottomToolBarArea, self.videoControls) self.videoControls_2 = QtWidgets.QToolBar() self.videoControls_2.setMovable(True) self.videoControls_2.setFloatable(True) self.videoControls_2.setObjectName("videoControls_2") self.videoControls_2.setStyleSheet( "QToolBar#videoControls_2 { border: 50px }") self.addToolBar(Qt.ToolBarArea.TopToolBarArea, self.videoControls_2) self.frames_to_skip_slider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal) self.frames_to_skip_slider.setMinimum(1) self.frames_to_skip_slider.setMaximum(100) self.frames_to_skip_slider.setValue(3) self.frames_to_skip_slider.setTickPosition( QtWidgets.QSlider.TickPosition.TicksBelow) self.frames_to_skip_slider.setTickInterval(1) self.frames_to_skip_slider.setMaximumWidth(250) self.frames_to_skip_slider.valueChanged.connect( self.frames_to_skip_slider_changed) self.frames_to_skip_label = QtWidgets.QLabel() self.frames_to_skip_label.setStyleSheet( "QLabel { font-size: 10pt; font-weight: bold; }") self.frames_to_skip_slider.setValue(30) self.videoControls.addWidget(self.frames_to_skip_label) self.videoControls.addWidget(self.frames_to_skip_slider) self.previousFrame_button = QtWidgets.QPushButton() self.previousFrame_button.setText("<<") self.previousFrame_button.setShortcut( self._config['shortcuts']['prev_x']) self.previousFrame_button.setToolTip( f'Jump Backward ({self._config["shortcuts"]["prev_x"]})') self.previousFrame_button.clicked.connect( self.previousFrame_buttonClicked) self.previous_1_Frame_button = QtWidgets.QPushButton() self.previous_1_Frame_button.setText("<") self.previous_1_Frame_button.setShortcut( self._config['shortcuts']['prev_1']) self.previous_1_Frame_button.setToolTip( f'Previous Frame ({self._config["shortcuts"]["prev_1"]})') self.previous_1_Frame_button.clicked.connect( self.previous_1_Frame_buttonclicked) self.playPauseButton = QtWidgets.QPushButton() self.playPauseButton_mode = "Play" self.playPauseButton.setShortcut(self._config['shortcuts']['play']) self.playPauseButton.setToolTip( f'Play ({self._config["shortcuts"]["play"]})') self.playPauseButton.setIcon( self.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_MediaPlay)) self.playPauseButton.setIconSize(QtCore.QSize(22, 22)) self.playPauseButton.setStyleSheet("QPushButton { margin: 5px;}") # when the button is clicked, print "Pressed!" in the terminal self.playPauseButton.pressed.connect(self.playPauseButtonClicked) self.nextFrame_button = QtWidgets.QPushButton() self.nextFrame_button.setText(">>") self.nextFrame_button.setShortcut(self._config['shortcuts']['next_x']) self.nextFrame_button.setToolTip( f'Jump forward ({self._config["shortcuts"]["next_x"]})') self.nextFrame_button.clicked.connect(self.nextFrame_buttonClicked) self.next_1_Frame_button = QtWidgets.QPushButton() self.next_1_Frame_button.setText(">") self.next_1_Frame_button.setShortcut( self._config['shortcuts']['next_1']) self.next_1_Frame_button.setToolTip( f'Next Frame ({self._config["shortcuts"]["next_1"]})') self.next_1_Frame_button.clicked.connect( self.next_1_Frame_buttonClicked) self.videoControls.addWidget(self.previousFrame_button) self.videoControls.addWidget(self.previous_1_Frame_button) self.videoControls.addWidget(self.playPauseButton) self.videoControls.addWidget(self.next_1_Frame_button) self.videoControls.addWidget(self.nextFrame_button) self.main_video_frames_slider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal) self.main_video_frames_slider.setMinimum(1) self.main_video_frames_slider.setMaximum(100) self.main_video_frames_slider.setValue(2) self.main_video_frames_slider.setTickPosition( QtWidgets.QSlider.TickPosition.TicksBelow) self.main_video_frames_slider.setTickInterval(1) self.main_video_frames_slider.setMaximumWidth(1000) self.main_video_frames_slider.valueChanged.connect( self.main_video_frames_slider_changed) self.main_video_frames_label_1 = QtWidgets.QLabel() self.main_video_frames_label_2 = QtWidgets.QLabel() # make the label text bigger and bold self.main_video_frames_label_1.setStyleSheet( "QLabel { font-size: 12pt; font-weight: bold; }") self.main_video_frames_label_2.setStyleSheet( "QLabel { font-size: 12pt; font-weight: bold; }") # labels should show the current frame number / total number of frames and cuurent time / total time self.videoControls.addWidget(self.main_video_frames_label_1) self.videoControls.addWidget(self.main_video_frames_slider) self.videoControls.addWidget(self.main_video_frames_label_2) # now we start the videocontrols_2 toolbar widgets # add the slider to control the video frame self.frames_to_track_slider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal) self.frames_to_track_slider.setMinimum(1) self.frames_to_track_slider.setMaximum(100) self.frames_to_track_slider.setValue(4) self.frames_to_track_slider.setTickPosition( QtWidgets.QSlider.TickPosition.TicksBelow) self.frames_to_track_slider.setTickInterval(1) self.frames_to_track_slider.setMaximumWidth(200) self.frames_to_track_slider.valueChanged.connect( self.frames_to_track_slider_changed) # add text input to control the slider self.frames_to_track_input = QtWidgets.QLineEdit() self.frames_to_track_input.setText("4") # make the font bigger self.frames_to_track_input.setStyleSheet( "QLineEdit { font-size: 10pt; }") self.frames_to_track_input.setMaximumWidth(50) self.frames_to_track_input.textChanged.connect( self.frames_to_track_input_changed) self.frames_to_track_label_before = QtWidgets.QLabel("Track for") self.frames_to_track_label_before.setStyleSheet( "QLabel { font-size: 10pt; font-weight: bold; }") self.frames_to_track_label_after = QtWidgets.QLabel("frames") self.frames_to_track_label_after.setStyleSheet( "QLabel { font-size: 10pt; font-weight: bold; }") self.videoControls_2.addWidget(self.frames_to_track_label_before) self.videoControls_2.addWidget(self.frames_to_track_input) self.videoControls_2.addWidget(self.frames_to_track_label_after) self.videoControls_2.addWidget(self.frames_to_track_slider) self.frames_to_track_slider.setValue(10) self.track_dropdown = QtWidgets.QComboBox() self.track_dropdown.addItems( [f"Track for selected frames", "Track Only assigned objects", "Track Full Video"]) self.track_dropdown.setCurrentIndex(0) self.track_dropdown.currentIndexChanged.connect( self.track_dropdown_changed) self.videoControls_2.addWidget(self.track_dropdown) self.start_button = QtWidgets.QPushButton("Start Tracking") self.start_button.setIcon( QtGui.QIcon("labelme/icons/start.png")) # make the icon bigger self.start_button.setIconSize(QtCore.QSize(24, 24)) self.start_button.setStyleSheet(self.buttons_text_style_sheet) self.start_button.clicked.connect(self.start_tracking_button_clicked) self.videoControls_2.addWidget(self.start_button) self.tracking_progress_bar_label = QtWidgets.QLabel() self.tracking_progress_bar_label.setStyleSheet( "QLabel { font-size: 10pt; font-weight: bold; }") self.tracking_progress_bar_label.setText("Tracking Progress") self.videoControls_2.addWidget(self.tracking_progress_bar_label) self.tracking_progress_bar = QtWidgets.QProgressBar() self.tracking_progress_bar.setMaximumWidth(300) self.tracking_progress_bar.setMinimum(0) self.tracking_progress_bar.setMaximum(100) self.tracking_progress_bar.setValue(0) self.videoControls_2.addWidget(self.tracking_progress_bar) self.track_stop_button = QtWidgets.QPushButton() self.track_stop_button.setStyleSheet( "QPushButton {font-size: 10pt; margin: 2px 5px; padding: 2px 7px;font-weight: bold; background-color: #FF9090; color: #FFFFFF;} QPushButton:hover {background-color: #FF0000;} QPushButton:disabled {background-color: #7A7A7A;}") self.track_stop_button.setStyleSheet( "QPushButton {font-size: 10pt; margin: 2px 5px; padding: 2px 7px;font-weight: bold; background-color: #FF0000; color: #FFFFFF;} QPushButton:hover {background-color: #FE4242;} QPushButton:disabled {background-color: #7A7A7A;}") self.track_stop_button.setText("Stop Tracking") self.track_stop_button.setIcon( QtGui.QIcon("labelme/icons/stop.png")) # make the icon bigger self.track_stop_button.setIconSize(QtCore.QSize(24, 24)) # self.track_stop_button.setShortcut(self._config['shortcuts']['stop']) self.track_stop_button.setToolTip( f'Stop Tracking ({self._config["shortcuts"]["stop"]})') self.track_stop_button.pressed.connect( self.Escape_clicked) self.videoControls_2.addWidget(self.track_stop_button) # add 5 checkboxes to control the CURRENT ANNOATAION FLAGS including (bbox , id , class , mask , traj) self.bbox_checkBox = QtWidgets.QCheckBox() self.bbox_checkBox.setText("bbox") self.bbox_checkBox.setChecked(True) self.bbox_checkBox.stateChanged.connect(self.bbox_checkBox_changed) self.id_checkBox = QtWidgets.QCheckBox() self.id_checkBox.setText("id") self.id_checkBox.setChecked(True) self.id_checkBox.stateChanged.connect(self.id_checkBox_changed) self.class_checkBox = QtWidgets.QCheckBox() self.class_checkBox.setText("class") self.class_checkBox.setChecked(True) self.class_checkBox.stateChanged.connect(self.class_checkBox_changed) self.conf_checkBox = QtWidgets.QCheckBox() self.conf_checkBox.setText("confidence") self.conf_checkBox.setChecked(True) self.conf_checkBox.stateChanged.connect(self.conf_checkBox_changed) self.mask_checkBox = QtWidgets.QCheckBox() self.mask_checkBox.setText("mask") self.mask_checkBox.setChecked(True) self.mask_checkBox.stateChanged.connect(self.mask_checkBox_changed) self.traj_checkBox = QtWidgets.QCheckBox() self.traj_checkBox.setText("trajectories") self.traj_checkBox.setChecked(False) self.traj_checkBox.stateChanged.connect(self.traj_checkBox_changed) # make qlineedit to alter the self.CURRENT_ANNOATAION_TRAJECTORIES['length'] value self.trajectory_length_lineEdit = QtWidgets.QLineEdit() self.trajectory_length_lineEdit.setText(str(30)) self.trajectory_length_lineEdit.setMaximumWidth(50) self.trajectory_length_lineEdit.editingFinished.connect( self.trajectory_length_lineEdit_changed) self.polygons_visable_checkBox = QtWidgets.QCheckBox() self.polygons_visable_checkBox.setText("show polygons") self.polygons_visable_checkBox.setChecked(True) self.polygons_visable_checkBox.stateChanged.connect( self.polygons_visable_checkBox_changed) self.vis_options = [self.id_checkBox, self.class_checkBox, self.bbox_checkBox, self.mask_checkBox, self.polygons_visable_checkBox, self.traj_checkBox, self.trajectory_length_lineEdit, self.conf_checkBox] # add to self.vis_dock self.vis_widget.setLayout(QtWidgets.QGridLayout()) self.vis_widget.layout().setContentsMargins(10, 10, 25, 10) # set padding self.vis_widget.layout().addWidget(self.id_checkBox, 0, 0) self.vis_widget.layout().addWidget(self.class_checkBox, 0, 1) self.vis_widget.layout().addWidget(self.bbox_checkBox, 1, 0) self.vis_widget.layout().addWidget(self.mask_checkBox, 1, 1) self.vis_widget.layout().addWidget(self.traj_checkBox, 2, 0) self.vis_widget.layout().addWidget(self.trajectory_length_lineEdit, 2, 1) self.vis_widget.layout().addWidget(self.polygons_visable_checkBox, 3, 0) self.vis_widget.layout().addWidget(self.conf_checkBox, 3, 1) for option in self.vis_options: option.setEnabled(False) # save current frame self.update_current_frame_annotation_button = QtWidgets.QPushButton() self.update_current_frame_annotation_button.setStyleSheet( self.buttons_text_style_sheet) self.update_current_frame_annotation_button.setText( "Apply Changes") self.update_current_frame_annotation_button.setIcon( QtGui.QIcon("labelme/icons/done.png")) # make the icon bigger self.update_current_frame_annotation_button.setIconSize( QtCore.QSize(24, 24)) self.update_current_frame_annotation_button.setShortcut( self._config['shortcuts']['update_frame']) self.update_current_frame_annotation_button.setToolTip( f'Apply changes on current frame ({self._config["shortcuts"]["update_frame"]})') self.update_current_frame_annotation_button.clicked.connect( self.update_current_frame_annotation_button_clicked) self.videoControls_2.addWidget( self.update_current_frame_annotation_button) # add a button to clear all video annotations self.clear_video_annotations_button = QtWidgets.QPushButton() self.clear_video_annotations_button.setStyleSheet( self.buttons_text_style_sheet) self.clear_video_annotations_button.setText("Clear All") self.clear_video_annotations_button.setIcon( QtGui.QIcon("labelme/icons/clear.png")) # make the icon bigger self.clear_video_annotations_button.setIconSize(QtCore.QSize(24, 24)) self.clear_video_annotations_button.setShortcut( self._config['shortcuts']['clear_annotations']) self.clear_video_annotations_button.setToolTip( f'Clears Annotations from all frames ({self._config["shortcuts"]["clear_annotations"]})') self.clear_video_annotations_button.clicked.connect( self.clear_video_annotations_button_clicked) self.videoControls_2.addWidget(self.clear_video_annotations_button) self.set_video_controls_visibility(False) def draw_bb_on_image(self, image, shapes, image_qt_flag=True): return visualizations.draw_bb_on_image(self.CURRENT_ANNOATAION_TRAJECTORIES, self.INDEX_OF_CURRENT_FRAME, self.CURRENT_ANNOATAION_FLAGS, self.TOTAL_VIDEO_FRAMES, image, shapes, image_qt_flag) def waitWindow(self, visible=False, text=None): if visible: self.canvas.is_loading = True if text is not None: self.canvas.loading_text = text else: self.canvas.is_loading = False self.canvas.loading_text = "Loading..." self.canvas.repaint() QtWidgets.QApplication.processEvents() def set_sam_toolbar_enable(self, enable=False): for widget in self.sam_toolbar.children(): try: widget.setEnabled(enable or widget.accessibleName( ) == 'sam_enhance_annotation_button' or widget.accessibleName() == 'sam_model_comboBox') except: pass def set_sam_toolbar_visibility(self, visible=False): if not visible: try: self.sam_clear_annotation_button_clicked() self.sam_buttons_colors("X") except: pass self.sam_toolbar.setVisible(visible) for widget in self.sam_toolbar.children(): try: widget.setVisible(visible) except: pass def addSamControls(self): # add a toolbar self.sam_toolbar = QtWidgets.QToolBar() self.sam_toolbar.setMovable(True) self.sam_toolbar.setFloatable(True) self.sam_toolbar.setObjectName("sam_toolbar") self.sam_toolbar.setStyleSheet( "QToolBar#videoControls { border: 50px }") self.addToolBar(QtCore.Qt.ToolBarArea.TopToolBarArea, self.sam_toolbar) # add a label that says "sam model" self.sam_model_label = QtWidgets.QLabel() self.sam_model_label.setText("SAM Model") self.sam_model_label.setStyleSheet( "QLabel { font-size: 10pt; font-weight: bold; }") self.sam_toolbar.addWidget(self.sam_model_label) # add a dropdown menu to select the sam model self.sam_model_comboBox = QtWidgets.QComboBox() self.sam_model_comboBox.setAccessibleName("sam_model_comboBox") # add a label inside the combobox that says "Select Model (SAM disabled)" and make it unselectable self.sam_model_comboBox.addItem("Select Model (SAM disabled)") self.sam_model_comboBox.addItems(self.sam_models()) self.sam_model_comboBox.currentIndexChanged.connect( self.sam_model_comboBox_changed) self.sam_toolbar.addWidget(self.sam_model_comboBox) # add a button for adding a point in sam self.sam_add_point_button = QtWidgets.QPushButton() self.sam_add_point_button.setStyleSheet( "QPushButton { font-size: 10pt; font-weight: bold; }") self.sam_add_point_button.setText("Add") # add icon to button self.sam_add_point_button.setIcon( QtGui.QIcon("labelme/icons/add.png")) # make the icon bigger self.sam_add_point_button.setIconSize(QtCore.QSize(24, 24)) self.sam_add_point_button.setToolTip( f'Add point ({self._config["shortcuts"]["SAM_add_point"]})') # set shortcut self.sam_add_point_button.setShortcut( self._config["shortcuts"]["SAM_add_point"]) self.sam_add_point_button.clicked.connect( self.sam_add_point_button_clicked) self.sam_toolbar.addWidget(self.sam_add_point_button) # add a button for removing a point in sam self.sam_remove_point_button = QtWidgets.QPushButton() self.sam_remove_point_button.setStyleSheet( "QPushButton { font-size: 10pt; font-weight: bold; }") self.sam_remove_point_button.setText("Remove") # add icon to button self.sam_remove_point_button.setIcon( QtGui.QIcon("labelme/icons/remove.png")) # make the icon bigger self.sam_remove_point_button.setIconSize(QtCore.QSize(24, 24)) # set hover text self.sam_remove_point_button.setToolTip( f'Remove Point ({self._config["shortcuts"]["SAM_remove_point"]})') # set shortcut self.sam_remove_point_button.setShortcut( self._config["shortcuts"]["SAM_remove_point"]) self.sam_remove_point_button.clicked.connect( self.sam_remove_point_button_clicked) self.sam_toolbar.addWidget(self.sam_remove_point_button) # add a button for selecting a box in sam self.sam_select_rect_button = QtWidgets.QPushButton() self.sam_select_rect_button.setStyleSheet( "QPushButton { font-size: 10pt; font-weight: bold; }") self.sam_select_rect_button.setText("Box") # add icon to button self.sam_select_rect_button.setIcon( QtGui.QIcon("labelme/icons/bbox.png")) # make the icon bigger self.sam_select_rect_button.setIconSize(QtCore.QSize(24, 24)) # set hover text self.sam_select_rect_button.setToolTip( f'Add Box ({self._config["shortcuts"]["SAM_select_rect"]})') # set shortcut self.sam_select_rect_button.setShortcut( self._config["shortcuts"]["SAM_select_rect"]) self.sam_select_rect_button.clicked.connect( self.sam_select_rect_button_clicked) self.sam_toolbar.addWidget(self.sam_select_rect_button) # add a point for clearing the annotation self.sam_clear_annotation_button = QtWidgets.QPushButton() self.sam_clear_annotation_button.setStyleSheet( "QPushButton { font-size: 10pt; font-weight: bold; }") self.sam_clear_annotation_button.setText("Clear") # add icon to button self.sam_clear_annotation_button.setIcon( QtGui.QIcon("labelme/icons/clear.png")) # make the icon bigger self.sam_clear_annotation_button.setIconSize(QtCore.QSize(24, 24)) self.sam_clear_annotation_button.setShortcut( self._config["shortcuts"]["SAM_clear"]) self.sam_clear_annotation_button.setToolTip( f'Clear points and boxes ({self._config["shortcuts"]["SAM_clear"]})') self.sam_clear_annotation_button.clicked.connect( self.sam_clear_annotation_button_clicked) self.sam_toolbar.addWidget(self.sam_clear_annotation_button) # add a point of finish object annotation self.sam_finish_annotation_button = QtWidgets.QPushButton() self.sam_finish_annotation_button.setStyleSheet( "QPushButton { font-size: 10pt; font-weight: bold; }") self.sam_finish_annotation_button.setText("Finish") # add icon to button self.sam_finish_annotation_button.setIcon( QtGui.QIcon("labelme/icons/done.png")) # make the icon bigger self.sam_finish_annotation_button.setIconSize(QtCore.QSize(24, 24)) self.sam_finish_annotation_button.clicked.connect( self.sam_finish_annotation_button_clicked) # set hover text self.sam_finish_annotation_button.setToolTip( f'Finish Annotation ({self._config["shortcuts"]["SAM_finish_annotation"]} or ENTER)') # set shortcut self.sam_finish_annotation_button.setShortcut( self._config["shortcuts"]["SAM_finish_annotation"]) self.sam_toolbar.addWidget(self.sam_finish_annotation_button) # add a point of close SAM self.sam_close_button = QtWidgets.QPushButton() self.sam_close_button.setStyleSheet( "QPushButton { font-size: 10pt; font-weight: bold; }") self.sam_close_button.setText("Manual") # add icon to button self.sam_close_button.setIcon( QtGui.QIcon("labelme/icons/objects.png")) # make the icon bigger self.sam_close_button.setIconSize(QtCore.QSize(24, 24)) self.sam_close_button.setShortcut( self._config["shortcuts"]["SAM_RESET"]) self.sam_close_button.setToolTip( f'Return to Manual Mode ({self._config["shortcuts"]["SAM_RESET"]} or ESC)') self.sam_close_button.clicked.connect( self.sam_reset_button_clicked) self.sam_toolbar.addWidget(self.sam_close_button) # add a point of replace with SAM self.sam_enhance_annotation_button = QtWidgets.QPushButton() self.sam_enhance_annotation_button.setAccessibleName( "sam_enhance_annotation_button") self.sam_enhance_annotation_button.setStyleSheet( "QPushButton { font-size: 10pt; font-weight: bold; }") self.sam_enhance_annotation_button.setText("Enhance Polygons") # add icon to button self.sam_enhance_annotation_button.setIcon( QtGui.QIcon("labelme/icons/SAM.png")) # make the icon bigger self.sam_enhance_annotation_button.setIconSize(QtCore.QSize(24, 24)) self.sam_enhance_annotation_button.setShortcut( self._config["shortcuts"]["SAM_enhance"]) self.sam_enhance_annotation_button.setToolTip( f'Enhance Selected Polygons with SAM ({self._config["shortcuts"]["SAM_enhance"]})') self.sam_enhance_annotation_button.clicked.connect( self.sam_enhance_annotation_button_clicked) self.sam_toolbar.addWidget(self.sam_enhance_annotation_button) self.set_sam_toolbar_enable(False) self.sam_buttons_colors("x") def updateSamControls(self): # remove all items from the combobox self.sam_model_comboBox.clear() # call the sam_models function to get all the models self.sam_model_comboBox.addItem("Select Model (SAM disabled)") self.sam_model_comboBox.addItems(self.sam_models()) def sam_reset_button_clicked(self): self.sam_clear_annotation_button_clicked() self.setCreateMode() def sam_enhance_annotation_button_clicked(self): if self.sam_model_comboBox.currentText() == "Select Model (SAM disabled)": MsgBox.OKmsgBox("SAM is disabled", "SAM is disabled.\nPlease enable SAM.") return try: same_image = self.sam_predictor.check_image( self.CURRENT_FRAME_IMAGE) except: return toBeEnhanced = self.canvas.selectedShapes if len( self.canvas.selectedShapes) > 0 else self.canvas.shapes for shape in toBeEnhanced: try: self.canvas.shapes.remove(shape) self.remLabels([shape]) except: return shapeX = mathOps.convert_qt_shapes_to_shapes([shape])[0] x1, y1, x2, y2 = shapeX["bbox"] cur_bbox, cur_segment = self.sam_enhanced_bbox_segment( self.CURRENT_FRAME_IMAGE, [x1, y1, x2, y2], 1.2, max_itr=5, forSHAPE=True) shapeX["points"] = cur_segment shapeX = mathOps.convert_shapes_to_qt_shapes([shapeX])[0] self.canvas.shapes.append(shapeX) self.addLabel(shapeX) if self.current_annotation_mode == "video": self.update_current_frame_annotation_button_clicked() else: self.sam_clear_annotation_button_clicked() self.refresh_image_MODE() self.sam_buttons_colors("X") def sam_models(self): cwd = os.getcwd() with open(cwd + '/models_menu/sam_models.json') as f: data = json.load(f) # get all files in a directory files = os.listdir(cwd + '/mmdetection/checkpoints/') models = [] for model in data: if model['checkpoint'].split('/')[-1] in files: models.append(model['name']) return models def sam_model_comboBox_changed(self): createFlag = self.canvas.mode == 0 self.canvas.cancelManualDrawing() self.sam_clear_annotation_button_clicked() self.sam_buttons_colors("X") if self.sam_model_comboBox.currentText() == "Select Model (SAM disabled)": self.set_sam_toolbar_enable(False) return model_type = self.sam_model_comboBox.currentText() self.waitWindow( visible=True, text=f'Please Wait.\n{model_type} is Loading...') with open('models_menu/sam_models.json') as f: data = json.load(f) checkpoint_path = "" for model in data: if model['name'] == model_type: checkpoint_path = model['checkpoint'] if checkpoint_path != "": self.sam_predictor = Sam_Predictor( model_type, checkpoint_path, device) try: self.sam_predictor.set_new_image(self.CURRENT_FRAME_IMAGE) except: print("please open an image first") self.waitWindow() return self.waitWindow() print("done loading model") if createFlag: self.setCreateMode() if self.sam_last_mode == "point": self.sam_add_point_button_clicked() elif self.sam_last_mode == "rectangle": self.sam_select_rect_button_clicked() else: self.setEditMode() def sam_buttons_colors(self, mode): setEnabled = False if self.sam_model_comboBox.currentText( ) == "Select Model (SAM disabled)" else True if not setEnabled: self.set_sam_toolbar_enable(setEnabled) self.set_sam_toolbar_colors("X") return self.set_sam_toolbar_colors(mode) def set_sam_toolbar_enable(self, setEnabled): self.sam_add_point_button.setEnabled(setEnabled) self.sam_remove_point_button.setEnabled(setEnabled) self.sam_select_rect_button.setEnabled(setEnabled) self.sam_clear_annotation_button.setEnabled(setEnabled) self.sam_finish_annotation_button.setEnabled(setEnabled) def set_sam_toolbar_colors(self, mode): red, green, blue, trans = "#2D7CFA;", "#2D7CFA;", "#2D7CFA;", "#4B515A;" hover_const = "QPushButton::hover { background-color : " disabled_const = "QPushButton:disabled { color : #7A7A7A} " style_sheet_const = "QPushButton { font-size: 10pt; font-weight: bold; color: #ffffff; background-color: " [add_style, add_hover] = [green, green] if mode == "add" else [trans, green] [remove_style, remove_hover] = [ red, red] if mode == "remove" else [trans, red] [rect_style, rect_hover] = [ green, green] if mode == "rect" else [trans, green] [clear_style, clear_hover] = [ red, red] if mode == "clear" else [trans, red] [finish_style, finish_hover] = [ blue, blue] if mode == "finish" else [trans, blue] [replace_style, replace_hover] = [ blue, blue] if mode == "replace" else [trans, blue] self.sam_add_point_button.setStyleSheet( style_sheet_const + add_style + ";}" + hover_const + add_hover + ";}" + disabled_const) self.sam_remove_point_button.setStyleSheet( style_sheet_const + remove_style + ";}" + hover_const + remove_hover + ";}" + disabled_const) self.sam_select_rect_button.setStyleSheet( style_sheet_const + rect_style + ";}" + hover_const + rect_hover + ";}" + disabled_const) self.sam_clear_annotation_button.setStyleSheet( style_sheet_const + clear_style + ";}" + hover_const + clear_hover + ";}" + disabled_const) self.sam_finish_annotation_button.setStyleSheet( style_sheet_const + finish_style + ";}" + hover_const + finish_hover + ";}" + disabled_const) self.sam_enhance_annotation_button.setStyleSheet( style_sheet_const + replace_style + ";}" + hover_const + replace_hover + ";}" + disabled_const) def sam_add_point_button_clicked(self): self.canvas.cancelManualDrawing() self.sam_last_mode = "point" self.sam_buttons_colors("add") try: same_image = self.sam_predictor.check_image( self.CURRENT_FRAME_IMAGE) except: self.sam_buttons_colors("x") return if not same_image: self.sam_clear_annotation_button_clicked() self.sam_buttons_colors("add") self.canvas.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor)) self.canvas.SAM_mode = "add point" def sam_remove_point_button_clicked(self): self.canvas.cancelManualDrawing() self.sam_buttons_colors("remove") try: same_image = self.sam_predictor.check_image( self.CURRENT_FRAME_IMAGE) except: self.sam_buttons_colors("x") return if not same_image: self.sam_clear_annotation_button_clicked() self.sam_buttons_colors("remove") self.canvas.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor)) self.canvas.SAM_mode = "remove point" def sam_select_rect_button_clicked(self): self.canvas.cancelManualDrawing() self.sam_last_mode = "rectangle" self.sam_buttons_colors("rect") try: same_image = self.sam_predictor.check_image( self.CURRENT_FRAME_IMAGE) except: self.sam_buttons_colors("x") return if not same_image: self.sam_clear_annotation_button_clicked() self.sam_buttons_colors("rect") self.canvas.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor)) self.canvas.SAM_mode = "select rect" def sam_clear_annotation_button_clicked(self): self.canvas.cancelManualDrawing() self.sam_buttons_colors("clear") self.canvas.SAM_coordinates = [] self.canvas.SAM_mode = "" self.canvas.SAM_rect = [] self.canvas.SAM_rects = [] self.current_sam_shape = None try: self.sam_predictor.clear_logit() except: pass self.labelList.clear() self.CURRENT_SHAPES_IN_IMG = mathOps.convert_qt_shapes_to_shapes( self.canvas.shapes) self.CURRENT_SHAPES_IN_IMG = self.check_sam_instance_in_shapes( self.CURRENT_SHAPES_IN_IMG) self.loadLabels(self.CURRENT_SHAPES_IN_IMG) def sam_finish_annotation_button_clicked(self): self.canvas.cancelManualDrawing() self.sam_buttons_colors("finish") # return the cursor to normal self.canvas.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor)) self.canvas.SAM_coordinates = [] self.canvas.SAM_rect = [] self.canvas.SAM_rects = [] self.canvas.SAM_mode = "finished" try: self.sam_predictor.clear_logit() if len(self.current_sam_shape) == 0: return except: if self.sam_last_mode == "point": self.sam_add_point_button_clicked() elif self.sam_last_mode == "rectangle": self.sam_select_rect_button_clicked() return self.labelList.clear() sam_qt_shape = mathOps.convert_shapes_to_qt_shapes([self.current_sam_shape])[0] self.canvas.SAM_current = sam_qt_shape self.canvas.finalise(SAM_SHAPE=True) self.CURRENT_SHAPES_IN_IMG = mathOps.convert_qt_shapes_to_shapes( self.canvas.shapes) self.CURRENT_SHAPES_IN_IMG = self.check_sam_instance_in_shapes( self.CURRENT_SHAPES_IN_IMG) try: if self.current_sam_shape["group_id"] != -1: self.CURRENT_SHAPES_IN_IMG.append(self.current_sam_shape) self.rec_frame_for_id( self.current_sam_shape["group_id"], self.INDEX_OF_CURRENT_FRAME) except: pass self.loadLabels(self.CURRENT_SHAPES_IN_IMG) # self.loadLabels(self.SAM_SHAPES_IN_IMAGE, replace=False) # clear the predictor of the finished shape self.sam_predictor.clear_logit() self.canvas.SAM_coordinates = [] # explicitly clear instead of being overriden by the next shape self.current_sam_shape = None self.canvas.SAM_current = None self.canvas.SAM_mode = "" if self.current_annotation_mode == "video": self.update_current_frame_annotation_button_clicked() else: self.canvas.shapes = mathOps.convert_shapes_to_qt_shapes( self.CURRENT_SHAPES_IN_IMG) self.sam_clear_annotation_button_clicked() self.refresh_image_MODE() def check_sam_instance_in_shapes(self, shapes): if len(shapes) == 0: return [] for shape in shapes: if shape["label"] == "SAM instance": # remove the shape from the list shapes.remove(shape) return shapes def run_sam_model(self): if self.sam_predictor is None or self.sam_model_comboBox.currentText() == "Select Model (SAM disabled)": print("please select a model") return try: same_image = self.sam_predictor.check_image( self.CURRENT_FRAME_IMAGE) except: self.sam_buttons_colors("x") return # prepre the input format for SAM input_points, input_labels = mathOps.SAM_points_and_labels_from_coordinates( self.canvas.SAM_coordinates) input_boxes = mathOps.SAM_rects_to_boxes(self.canvas.SAM_rects) mask, score = self.sam_predictor.predict(point_coords=input_points, point_labels=input_labels, box=input_boxes, image=self.CURRENT_FRAME_IMAGE) points = mathOps.mask_to_polygons(mask) shape = mathOps.polygon_to_shape(points, score) self.current_sam_shape = shape self.labelList.clear() self.CURRENT_SHAPES_IN_IMG = mathOps.convert_qt_shapes_to_shapes( self.canvas.shapes) self.CURRENT_SHAPES_IN_IMG = self.check_sam_instance_in_shapes( self.CURRENT_SHAPES_IN_IMG) self.CURRENT_SHAPES_IN_IMG.append(self.current_sam_shape) self.loadLabels(self.CURRENT_SHAPES_IN_IMG) def turnOFF_SAM(self): if self.sam_model_comboBox.currentText() != "Select Model (SAM disabled)": self.sam_clear_annotation_button_clicked() self.sam_buttons_colors('x') self.set_sam_toolbar_enable(False) self.canvas.SAM_mode = "" self.canvas.SAM_coordinates = [] self.canvas.SAM_rect = [] self.canvas.SAM_rects = [] self.canvas.SAM_current = None def turnON_SAM(self): if self.sam_model_comboBox.currentText() == "Select Model (SAM disabled)": return self.sam_buttons_colors("X") self.set_sam_toolbar_enable(True) self.canvas.SAM_mode = "" self.canvas.SAM_coordinates = [] self.canvas.SAM_rect = [] self.canvas.SAM_rects = [] self.canvas.SAM_current = None def sam_enhanced_bbox_segment(self, frameIMAGE, cur_bbox, thresh, max_itr=5, forSHAPE=False): oldAREA = abs(cur_bbox[2] - cur_bbox[0]) * \ abs(cur_bbox[3] - cur_bbox[1]) [x1, y1, x2, y2] = [cur_bbox[0], cur_bbox[1], cur_bbox[2], cur_bbox[3]] listPOINTS = [min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)] listPOINTS = [int(round(x)) for x in listPOINTS] input_boxes = [listPOINTS] mask, score = self.sam_predictor.predict(point_coords=None, point_labels=None, box=input_boxes, image=frameIMAGE) points = mathOps.mask_to_polygons(mask) SAMshape = mathOps.polygon_to_shape(points, score) cur_segment = SAMshape['points'] cur_segment = [[int(cur_segment[i]), int(cur_segment[i + 1])] for i in range(0, len(cur_segment), 2)] cur_bbox = [min(np.array(cur_segment)[:, 0]), min(np.array(cur_segment)[:, 1]), max(np.array(cur_segment)[:, 0]), max(np.array(cur_segment)[:, 1])] cur_bbox = [int(round(x)) for x in cur_bbox] newAREA = abs(cur_bbox[2] - cur_bbox[0]) * \ abs(cur_bbox[3] - cur_bbox[1]) bigger, smaller = max(oldAREA, newAREA), min(oldAREA, newAREA) if bigger/smaller < thresh or max_itr == 1: if forSHAPE: return cur_bbox, SAMshape['points'] else: return cur_bbox, cur_segment else: return self.sam_enhanced_bbox_segment(frameIMAGE, cur_bbox, thresh, max_itr-1, forSHAPE) def load_objects_from_json__json(self): if self.global_listObj != []: return self.global_listObj json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json' return mathOps.load_objects_from_json__json(json_file_name, self.TOTAL_VIDEO_FRAMES) def load_objects_to_json__json(self, listObj): self.global_listObj = listObj json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json' mathOps.load_objects_to_json__json(json_file_name, listObj) def load_objects_from_json__orjson(self): if self.global_listObj != []: return self.global_listObj json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json' return mathOps.load_objects_from_json__orjson(json_file_name, self.TOTAL_VIDEO_FRAMES) def load_objects_to_json__orjson(self, listObj): self.global_listObj = listObj json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json' mathOps.load_objects_to_json__orjson(json_file_name, listObj) ############################# important parameters across the gui ############################################ # INDEX_OF_CURRENT_FRAME # self.FRAMES_TO_SKIP # frames to track # self.TOTAL_VIDEO_FRAMES # self.CURRENT_VIDEO_FPS --> to be used to play the video at the correct speed # self.CAP # self.CLASS_NAMES_DICT # self.CURRENT_FRAME_IMAGE # self.CURRENT_VIDEO_NAME # self.CURRENT_VIDEO_PATH # self.CURRENT_SHAPES_IN_IMG # self.CURRENT_ANNOATAION_FLAGS = {"traj" : False , # "bbox" : False , # "id" : False , # "class" : True, # "mask" : True} # to do # remove the video processing tool bar in the other cases ############################################################################################################## ########################################## FIXME ############################################################# # before line 63 # - [medium] Set max zoom value to something big enough for FitWidth/Window # TODO(unknown): # - [high] Add polygon movement with arrow keys # - [high] Deselect shape when clicking and already selected(?) # - [low,maybe] Preview images on file dialogs. # - Zoom is too "steppy". ############################################################################################################## ######################################## Tracking Thread ##################################################### # class TrackingThread(QThread): # def __init__(self, parent=None): # super(TrackingThread, self).__init__(parent) # self.parent = parent # def run(self): # self.parent.track_buttonClicked() # def track_buttonClicked_wrapper(self): # # ... # # Disable the track button # # self.actions.track.setEnabled(False) # # Create a thread to run the tracking process # self.thread = TrackingThread(parent=self) # self.thread.start() ############################################################################################################## ####################################### trackButtonClicked ################################################### # import psutil # if self.INDEX_OF_CURRENT_FRAME % 10 == 0: # print( # f"Total Memory: {psutil.virtual_memory().total / 1024 ** 3} GB | Free Memory: {psutil.virtual_memory().free / 1024 ** 3} GB | Percent Used: {psutil.virtual_memory().percent} %") ############################################################################################################## ########################################## remove unnecessery use ############################################ # update_current_frame_annotation(self) ############################################################################################################## ================================================ FILE: DLTA_AI_app/labelme/cli/__init__.py ================================================ # flake8: noqa from . import draw_json from . import draw_label_png from . import json_to_dataset from . import on_docker ================================================ FILE: DLTA_AI_app/labelme/cli/draw_json.py ================================================ #!/usr/bin/env python import argparse import sys import imgviz import matplotlib.pyplot as plt from labelme.label_file import LabelFile from labelme import utils PY2 = sys.version_info[0] == 2 def main(): parser = argparse.ArgumentParser() parser.add_argument("json_file") args = parser.parse_args() label_file = LabelFile(args.json_file) img = utils.img_data_to_arr(label_file.imageData) label_name_to_value = {"_background_": 0} for shape in sorted(label_file.shapes, key=lambda x: x["label"]): label_name = shape["label"] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value lbl, _ = utils.shapes_to_label( img.shape, label_file.shapes, label_name_to_value ) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = imgviz.label2rgb( label=lbl, img=imgviz.asgray(img), label_names=label_names, font_size=30, loc="rb", ) plt.subplot(121) plt.imshow(img) plt.subplot(122) plt.imshow(lbl_viz) plt.show() if __name__ == "__main__": main() ================================================ FILE: DLTA_AI_app/labelme/cli/draw_label_png.py ================================================ import argparse import imgviz import matplotlib.pyplot as plt import numpy as np import PIL.Image from labelme.logger import logger def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("label_png", help="label PNG file") args = parser.parse_args() lbl = np.asarray(PIL.Image.open(args.label_png)) logger.info("label shape: {}".format(lbl.shape)) logger.info("unique label values: {}".format(np.unique(lbl))) lbl_viz = imgviz.label2rgb(lbl) plt.imshow(lbl_viz) plt.show() if __name__ == "__main__": main() ================================================ FILE: DLTA_AI_app/labelme/cli/json_to_dataset.py ================================================ import argparse import base64 import json import os import os.path as osp import imgviz import PIL.Image from labelme.logger import logger from labelme import utils def main(): logger.warning( "This script is aimed to demonstrate how to convert the " "JSON file to a single image dataset." ) logger.warning( "It won't handle multiple JSON files to generate a " "real-use dataset." ) parser = argparse.ArgumentParser() parser.add_argument("json_file") parser.add_argument("-o", "--out", default=None) args = parser.parse_args() json_file = args.json_file if args.out is None: out_dir = osp.basename(json_file).replace(".", "_") out_dir = osp.join(osp.dirname(json_file), out_dir) else: out_dir = args.out if not osp.exists(out_dir): os.mkdir(out_dir) data = json.load(open(json_file)) imageData = data.get("imageData") if not imageData: imagePath = os.path.join(os.path.dirname(json_file), data["imagePath"]) with open(imagePath, "rb") as f: imageData = f.read() imageData = base64.b64encode(imageData).decode("utf-8") img = utils.img_b64_to_arr(imageData) label_name_to_value = {"_background_": 0} for shape in sorted(data["shapes"], key=lambda x: x["label"]): label_name = shape["label"] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value lbl, _ = utils.shapes_to_label( img.shape, data["shapes"], label_name_to_value ) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = imgviz.label2rgb( label=lbl, img=imgviz.asgray(img), label_names=label_names, loc="rb" ) PIL.Image.fromarray(img).save(osp.join(out_dir, "img.png")) utils.lblsave(osp.join(out_dir, "label.png"), lbl) PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, "label_viz.png")) with open(osp.join(out_dir, "label_names.txt"), "w") as f: for lbl_name in label_names: f.write(lbl_name + "\n") logger.info("Saved to: {}".format(out_dir)) if __name__ == "__main__": main() ================================================ FILE: DLTA_AI_app/labelme/cli/on_docker.py ================================================ #!/usr/bin/env python from __future__ import print_function import argparse import distutils.spawn import json import os import os.path as osp import platform import shlex import subprocess import sys def get_ip(): dist = platform.platform().split("-")[0] if dist == "Linux": return "" elif dist == "Darwin": cmd = "ifconfig en0" output = subprocess.check_output(shlex.split(cmd)) if str != bytes: # Python3 output = output.decode("utf-8") for row in output.splitlines(): cols = row.strip().split(" ") if cols[0] == "inet": ip = cols[1] return ip else: raise RuntimeError("No ip is found.") else: raise RuntimeError("Unsupported platform.") def labelme_on_docker(in_file, out_file): ip = get_ip() cmd = "xhost + %s" % ip subprocess.check_output(shlex.split(cmd)) if out_file: out_file = osp.abspath(out_file) if osp.exists(out_file): raise RuntimeError("File exists: %s" % out_file) else: open(osp.abspath(out_file), "w") cmd = ( "docker run -it --rm" " -e DISPLAY={0}:0" " -e QT_X11_NO_MITSHM=1" " -v /tmp/.X11-unix:/tmp/.X11-unix" " -v {1}:{2}" " -w /home/developer" ) in_file_a = osp.abspath(in_file) in_file_b = osp.join("/home/developer", osp.basename(in_file)) cmd = cmd.format( ip, in_file_a, in_file_b, ) if out_file: out_file_a = osp.abspath(out_file) out_file_b = osp.join("/home/developer", osp.basename(out_file)) cmd += " -v {0}:{1}".format(out_file_a, out_file_b) cmd += " wkentaro/labelme labelme {0}".format(in_file_b) if out_file: cmd += " -O {0}".format(out_file_b) subprocess.call(shlex.split(cmd)) if out_file: try: json.load(open(out_file)) return out_file except Exception: if open(out_file).read() == "": os.remove(out_file) raise RuntimeError("Annotation is cancelled.") def main(): parser = argparse.ArgumentParser() parser.add_argument("in_file", help="Input file or directory.") parser.add_argument("-O", "--output") args = parser.parse_args() if not distutils.spawn.find_executable("docker"): print("Please install docker", file=sys.stderr) sys.exit(1) try: out_file = labelme_on_docker(args.in_file, args.output) if out_file: print("Saved to: %s" % out_file) except RuntimeError as e: sys.stderr.write(e.__str__() + "\n") sys.exit(1) if __name__ == "__main__": main() ================================================ FILE: DLTA_AI_app/labelme/config/__init__.py ================================================ import os.path as osp import shutil import yaml from labelme.logger import logger here = osp.dirname(osp.abspath(__file__)) def update_dict(target_dict, new_dict, validate_item=None): for key, value in new_dict.items(): if validate_item: validate_item(key, value) if key not in target_dict: logger.warn("Skipping unexpected key in config: {}".format(key)) continue if isinstance(target_dict[key], dict) and isinstance(value, dict): update_dict(target_dict[key], value, validate_item=validate_item) else: target_dict[key] = value # ----------------------------------------------------------------------------- def get_default_config(): config_file = osp.join(here, "default_config.yaml") with open(config_file) as f: config = yaml.safe_load(f) # save default config to ~/.labelmerc user_config_file = osp.join(osp.expanduser("~"), ".labelmerc") if not osp.exists(user_config_file): try: shutil.copy(config_file, user_config_file) except Exception: logger.warn("Failed to save config: {}".format(user_config_file)) return config def validate_config_item(key, value): if key == "validate_label" and value not in [None, "exact"]: raise ValueError( "Unexpected value for config key 'validate_label': {}".format( value ) ) if key == "shape_color" and value not in [None, "auto", "manual"]: raise ValueError( "Unexpected value for config key 'shape_color': {}".format(value) ) if key == "labels" and value is not None and len(value) != len(set(value)): raise ValueError( "Duplicates are detected for config key 'labels': {}".format(value) ) def get_config(config_file_or_yaml=None, config_from_args=None): # 1. default config config = get_default_config() # 2. specified as file or yaml if config_file_or_yaml is not None: config_from_yaml = yaml.safe_load(config_file_or_yaml) if not isinstance(config_from_yaml, dict): with open(config_from_yaml) as f: logger.info( "Loading config file from: {}".format(config_from_yaml) ) config_from_yaml = yaml.safe_load(f) update_dict( config, config_from_yaml, validate_item=validate_config_item ) # 3. command line argument or specified config file if config_from_args is not None: update_dict( config, config_from_args, validate_item=validate_config_item ) return config ================================================ FILE: DLTA_AI_app/labelme/config/default_config.yaml ================================================ auto_save: false canvas: double_click: close num_backups: 10 default_classes: - person - bicycle - car - motorcycle - airplane - bus - train - truck - boat - traffic light - fire hydrant - stop sign - parking meter - bench - bird - cat - dog - horse - sheep - cow - elephant - bear - zebra - giraffe - backpack - umbrella - handbag - tie - suitcase - frisbee - skis - snowboard - sports ball - kite - baseball bat - baseball glove - skateboard - surfboard - tennis racket - bottle - wine glass - cup - fork - knife - spoon - bowl - banana - apple - sandwich - orange - broccoli - carrot - hot dog - pizza - donut - cake - chair - couch - potted plant - bed - dining table - toilet - tv - laptop - mouse - remote - keyboard - cell phone - microwave - oven - toaster - sink - refrigerator - book - clock - vase - scissors - teddy bear - hair drier - toothbrush default_shape_color: - 0 - 255 - 0 display_label_popup: true epsilon: 10.0 file_dock: closable: true floatable: true movable: true show: true file_search: null fit_to_content: column: true row: false flag_dock: closable: true floatable: true movable: true show: true flags: null keep_prev: false keep_prev_brightness: false keep_prev_contrast: false keep_prev_scale: false label_colors: null label_completion: startswith label_dock: closable: true floatable: true movable: true show: true label_flags: null labels: null logger_level: info mute: false shape: fill_color: - 0 - 255 - 0 - 0 hvertex_fill_color: - 255 - 255 - 255 - 255 line_color: - 0 - 255 - 0 - 128 select_fill_color: - 0 - 255 - 0 - 155 select_line_color: - 255 - 255 - 255 - 255 vertex_fill_color: - 0 - 255 - 0 - 255 shape_color: auto shape_dock: closable: true floatable: true movable: true show: true shift_auto_shape_color: 0 shortcuts: SAM_RESET: X SAM_add_point: A SAM_clear: C SAM_enhance: shift+E SAM_finish_annotation: F SAM_remove_point: R SAM_select_rect: B add_point_to_edge: Ctrl+Shift+P clear_annotations: shift+D close: Ctrl+W copy: Ctrl+C create_circle: null create_line: null create_linestrip: null create_point: null create_polygon: Ctrl+N create_rectangle: null delete_file: Ctrl+Delete delete_polygon: Delete duplicate_polygon: Ctrl+D edit_label: Ctrl+L edit_polygon: Ctrl+J export: Ctrl+E export_video: shift+V fit_width: Ctrl+Shift+F fit_window: null ignore_updates: shift+Z interpolate: shift+I mark_as_key: shift+M next_1: Right next_x: Up open: Ctrl+O open_dir: Ctrl+U open_next: null open_prev: null open_video: Ctrl+M open_video_frames: Ctrl+F paste: Ctrl+V play: Space prev_1: Left prev_x: Down quit: Ctrl+Q save: Ctrl+S save_as: Ctrl+Shift+S save_to: null scale: shift+S stop: Escape toggle_keep_prev_mode: Ctrl+P track: shift+T track_assigned: shift+A track_full: shift+F undo: Ctrl+Z undo_last_point: Backspace update_frame: shift+U zoom_in: Ctrl++ zoom_out: Ctrl+- zoom_to_original: Ctrl+0 show_cross_line: true show_label_text_field: true sort_labels: true store_data: true theme: auto validate_label: null vis_dock: closable: true floatable: true movable: true show: true ================================================ FILE: DLTA_AI_app/labelme/config/default_config_base.yaml ================================================ auto_save: false canvas: double_click: close num_backups: 10 default_classes: - person - bicycle - car - motorcycle - airplane - bus - train - truck - boat - traffic light - fire hydrant - stop sign - parking meter - bench - bird - cat - dog - horse - sheep - cow - elephant - bear - zebra - giraffe - backpack - umbrella - handbag - tie - suitcase - frisbee - skis - snowboard - sports ball - kite - baseball bat - baseball glove - skateboard - surfboard - tennis racket - bottle - wine glass - cup - fork - knife - spoon - bowl - banana - apple - sandwich - orange - broccoli - carrot - hot dog - pizza - donut - cake - chair - couch - potted plant - bed - dining table - toilet - tv - laptop - mouse - remote - keyboard - cell phone - microwave - oven - toaster - sink - refrigerator - book - clock - vase - scissors - teddy bear - hair drier - toothbrush default_shape_color: - 0 - 255 - 0 display_label_popup: true epsilon: 10.0 file_dock: closable: true floatable: true movable: true show: true file_search: null fit_to_content: column: true row: false flag_dock: closable: true floatable: true movable: true show: true flags: null keep_prev: false keep_prev_brightness: false keep_prev_contrast: false keep_prev_scale: false label_colors: null label_completion: startswith label_dock: closable: true floatable: true movable: true show: true label_flags: null labels: null logger_level: info mute: false shape: fill_color: - 0 - 255 - 0 - 0 hvertex_fill_color: - 255 - 255 - 255 - 255 line_color: - 0 - 255 - 0 - 128 select_fill_color: - 0 - 255 - 0 - 155 select_line_color: - 255 - 255 - 255 - 255 vertex_fill_color: - 0 - 255 - 0 - 255 shape_color: auto shape_dock: closable: true floatable: true movable: true show: true shift_auto_shape_color: 0 shortcuts: SAM_RESET: X SAM_add_point: A SAM_clear: C SAM_enhance: shift+E SAM_finish_annotation: F SAM_remove_point: R SAM_select_rect: B add_point_to_edge: Ctrl+Shift+P clear_annotations: shift+D close: Ctrl+W copy: Ctrl+C create_circle: null create_line: null create_linestrip: null create_point: null create_polygon: Ctrl+N create_rectangle: null delete_file: Ctrl+Delete delete_polygon: Delete duplicate_polygon: Ctrl+D edit_label: Ctrl+L edit_polygon: Ctrl+J export: Ctrl+E export_video: shift+V fit_width: Ctrl+Shift+F fit_window: null ignore_updates: shift+Z interpolate: shift+I mark_as_key: shift+M next_1: Right next_x: Up open: Ctrl+O open_dir: Ctrl+U open_next: null open_prev: null open_video: Ctrl+M open_video_frames : Ctrl+F paste: Ctrl+V play: Space prev_1: Left prev_x: Down quit: Ctrl+Q save: Ctrl+S save_as: Ctrl+Shift+S save_to: null scale: shift+S stop: Escape toggle_keep_prev_mode: Ctrl+P track: shift+T track_assigned: shift+A track_full: shift+F undo: Ctrl+Z undo_last_point: Backspace update_frame: shift+U zoom_in: Ctrl++ zoom_out: Ctrl+- zoom_to_original: Ctrl+0 show_cross_line: true show_label_text_field: true sort_labels: true store_data: true theme: auto validate_label: null vis_dock: closable: true floatable: true movable: true show: true ================================================ FILE: DLTA_AI_app/labelme/intelligence.py ================================================ from ultralytics import YOLO import json import time try: from inferencing import models_inference except ModuleNotFoundError: import subprocess print("The required package 'mmcv-full' is not currently installed. It will now be installed. This process may take some time. Note that this package will only be installed the first time you use DLTA-AI.") subprocess.run(["mim", "install", "mmcv-full==1.7.0"]) from inferencing import models_inference from labelme.label_file import LabelFile from labelme import PY2 from PyQt6.QtCore import QThread from PyQt6.QtCore import pyqtSignal as pyqtSignal from PyQt6 import QtGui from PyQt6 import QtWidgets import os import os.path as osp import warnings import yaml from .utils.helpers.mathOps import color_palette import torch from mmdet.apis import init_detector warnings.filterwarnings("ignore") from .widgets.MsgBox import OKmsgBox from .utils.helpers import mathOps coco_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] # make a list of 12 unique colors as we will use them to draw bounding boxes of different classes in different colors # so the calor palette will be used to draw bounding boxes of different classes in different colors # the color pallette should have the famous 12 colors as red, green, blue, yellow, cyan, magenta, white, black, gray, brown, pink, and orange in bgr format class IntelligenceWorker(QThread): sinOut = pyqtSignal(int, int) def __init__(self, parent, images, source,multi_model_flag=False): super(IntelligenceWorker, self).__init__(parent) self.parent = parent self.source = source self.images = images self.multi_model_flag = multi_model_flag self.notif = [] def run(self): index = 0 total = len(self.images) for filename in self.images: if self.parent.isVisible == False: return if self.source.operationCanceled == True: return index = index + 1 json_name = osp.splitext(filename)[0] + ".json" # if os.path.exists(json_name)==False: if os.path.isdir(json_name): os.remove(json_name) try: print("Decoding "+filename) if self.multi_model_flag: s = self.source.get_shapes_of_one(filename, multi_model_flag=True) else: s = self.source.get_shapes_of_one(filename) s = mathOps.convert_shapes_to_qt_shapes(s) self.source.saveLabelFile(filename, s) except Exception as e: print(e) self.sinOut.emit(index, total) class Intelligence(): def __init__(self, parent): self.reader = models_inference() self.parent = parent self.conf_threshold = 0.3 self.iou_threshold = 0.5 with open ("labelme/config/default_config.yaml") as f: self.config = yaml.load(f, Loader=yaml.FullLoader) self.default_classes = self.config["default_classes"] try: self.selectedclasses = {} for class_ in self.default_classes: if class_ in coco_classes: index = coco_classes.index(class_) self.selectedclasses[index] = class_ except: self.selectedclasses = {i:class_ for i,class_ in enumerate(coco_classes)} print("error in loading the default classes from the config file, so we will use all the coco classes") self.selectedmodels = [] self.current_model_name, self.current_mm_model = self.make_mm_model("") @torch.no_grad() def make_mm_model(self, selected_model_name): try: with open("saved_models.json") as json_file: data = json.load(json_file) if selected_model_name == "": # read the saved_models.json file and import the config and checkpoint files from the first model selected_model_name = list(data.keys())[0] config = data[selected_model_name]["config"] checkpoint = data[selected_model_name]["checkpoint"] else: config = data[selected_model_name]["config"] checkpoint = data[selected_model_name]["checkpoint"] print( f'selected model : {selected_model_name} \nconfig : {config}\ncheckpoint : {checkpoint} \n') except Exception as e: OKmsgBox("Error", f"Error in loading the model\n{e}", "critical") return torch.cuda.empty_cache() if "YOLOv8" in selected_model_name: model = YOLO(checkpoint) model.fuse() return selected_model_name, model try: print(f"From the working one: {config}") model = init_detector(config, checkpoint, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")) except: print( "Error in loading the model, please check if the config and checkpoint files do exist") # cfg_options= dict(iou_threshold=0.2)) # "C:\Users\Shehab\Desktop\l001\ANNOTATION_TOOL\mmdetection\mmdetection\configs\yolact\yolact_r50_1x8_coco.py" # model = init_detector("C:/Users/Shehab/Desktop/mmdetection/mmdetection/configs/detectors/htc_r50_sac_1x_coco.py", # "C:/Users/Shehab/Desktop/mmdetection/mmdetection/checkpoints/htc_r50_sac_1x_coco-bfa60c54.pth", device = torch.device("cuda")) return selected_model_name, model @ torch.no_grad() def make_mm_model_more(self, selected_model_name, config, checkpoint): torch.cuda.empty_cache() print( f"Selected model is {selected_model_name}\n and config is {config}\n and checkpoint is {checkpoint}") # if YOLOv8 if "YOLOv8" in selected_model_name: try: model = YOLO(checkpoint) model.fuse() return selected_model_name, model except Exception as e: OKmsgBox("Error", f"Error in loading the model\n{e}", "critical") return # It's a MMDetection model else: try: print(f"From the new one: {config}") model = init_detector(config, checkpoint, device=torch.device( "cuda" if torch.cuda.is_available() else "cpu")) except Exception as e: OKmsgBox OKmsgBox("Error", f"Error in loading the model\n{e}", "critical") return return selected_model_name, model def get_shapes_of_one(self, image, img_array_flag=False, multi_model_flag=False): # print(f"Threshold is {self.conf_threshold}") # results = self.reader.decode_file(img_path = filename, threshold = self.conf_threshold , selected_model_name = self.current_model_name)["results"] start_time = time.time() # if img_array_flag is true then the image is a numpy array and not a path if multi_model_flag: # to handle the case of the user selecting no models if len(self.selectedmodels) == 0: return [] self.reader.annotating_models.clear() for model_name in self.selectedmodels: self.current_model_name, self.current_mm_model = self.make_mm_model( model_name) if img_array_flag: results0, results1 = self.reader.decode_file( img=image, model=self.current_mm_model, classdict=self.selectedclasses, threshold=self.conf_threshold, img_array_flag=True) else: results0, results1 = self.reader.decode_file( img=image, model=self.current_mm_model, classdict=self.selectedclasses, threshold=self.conf_threshold) self.reader.annotating_models[model_name] = [ results0, results1] end_time = time.time() print( f"Time taken to annoatate img on {self.current_model_name}: {int((end_time - start_time)*1000)} ms" + "\n") print('merging masks') results0, results1 = self.reader.merge_masks() results = self.reader.polegonise( results0, results1, classdict=self.selectedclasses, threshold=self.conf_threshold)['results'] else: if img_array_flag: results = self.reader.decode_file( img=image, model=self.current_mm_model, classdict=self.selectedclasses, threshold=self.conf_threshold, img_array_flag=True) # print(type(results)) if isinstance(results, tuple): results = self.reader.polegonise( results[0], results[1], classdict=self.selectedclasses, threshold=self.conf_threshold)['results'] else: results = results['results'] else: results = self.reader.decode_file( img=image, model=self.current_mm_model, classdict=self.selectedclasses, threshold=self.conf_threshold) if isinstance(results, tuple): results = self.reader.polegonise( results[0], results[1], classdict=self.selectedclasses, threshold=self.conf_threshold)['results'] else: results = results['results'] end_time = time.time() print( f"Time taken to annoatate img on {self.current_model_name}: {int((end_time - start_time)*1000)} ms") shapes = [] for result in results: shape = {} shape["label"] = result["class"] shape["content"] = result["confidence"] shape["group_id"] = None shape["shape_type"] = "polygon" shape["bbox"] = mathOps.get_bbox_xyxy(result["seg"]) shape["flags"] = {} shape["other_data"] = {} # shape_points is result["seg"] flattened shape["points"] = [item for sublist in result["seg"] for item in sublist] shapes.append(shape) shapes, boxes, confidences, class_ids, segments = mathOps.OURnms_confidenceBased( shapes, self.iou_threshold) # self.addLabel(shape) return shapes # print the labels of the selected classes in the dialog # def updatlabellist(self): # for selectedclass in self.selectedclasses.values(): # shape = Shape() # shape.label = selectedclass # shape.content = "" # shape.shape_type="polygon" # shape.flags = {} # shape.other_data = {} # mainwindow = self.parent # mainwindow.addLabel(shape) def get_shapes_of_batch(self, images, multi_model_flag=False, notif = []): self.pd = self.startOperationDialog() self.thread = IntelligenceWorker(self.parent, images, self, multi_model_flag) self.thread.sinOut.connect(self.updateDialog) self.thread.start() self.notif = notif def updateDialog(self, completed, total): progress = int(completed/total*100) self.pd.setLabelText(str(completed) + "/" + str(total)) self.pd.setValue(progress) if completed == total: self.onProgressDialogCanceledOrCompleted() def startOperationDialog(self): self.operationCanceled = False pd1 = QtWidgets.QProgressDialog( 'Progress', 'Cancel', 0, 100, self.parent) pd1.setLabelText('Progress') pd1.setCancelButtonText('Cancel') pd1.setRange(0, 100) pd1.setValue(0) pd1.setMinimumDuration(0) pd1.show() pd1.canceled.connect(self.onProgressDialogCanceledOrCompleted) return pd1 def onProgressDialogCanceledOrCompleted(self): try: if not self.notif[0] and not self.notif[1].isActiveWindow(): self.notif[2]("Batch Annotation Completed") except: print("Error in batch mode notification") self.operationCanceled = True if self.parent.lastOpenDir and osp.exists(self.parent.lastOpenDir): self.parent.importDirImages(self.parent.lastOpenDir) else: self.parent.loadFile(self.parent.filename) def clear_annotating_models(self): self.reader.annotating_models.clear() def saveLabelFile(self, filename, detectedShapes): lf = LabelFile() def format_shape(s): data = s.other_data.copy() data.update( dict( label=s.label.encode("utf-8") if PY2 else s.label, points=mathOps.flattener(s.points), bbox=s.bbox, group_id=s.group_id, content=s.content, shape_type=s.shape_type, flags=s.flags, ) ) return data shapes = [format_shape(item) for item in detectedShapes] imageData = LabelFile.load_image_file(filename) image = QtGui.QImage.fromData(imageData) if osp.dirname(filename) and not osp.exists(osp.dirname(filename)): os.makedirs(osp.dirname(filename)) json_name = osp.splitext(filename)[0] + ".json" imagePath = osp.relpath(filename, osp.dirname(json_name)) lf.save( filename=json_name, shapes=shapes, imagePath=imagePath, imageData=imageData, imageHeight=image.height(), imageWidth=image.width(), otherData={}, flags={}, ) ================================================ FILE: DLTA_AI_app/labelme/label_file.py ================================================ import base64 import contextlib import io import json import os.path as osp import PIL.Image from labelme import __version__ from labelme.logger import logger from labelme import PY2 from labelme import QT4 from labelme import utils PIL.Image.MAX_IMAGE_PIXELS = None @contextlib.contextmanager def open(name, mode): assert mode in ["r", "w"] if PY2: mode += "b" encoding = None else: encoding = "utf-8" yield io.open(name, mode, encoding=encoding) return class LabelFileError(Exception): pass class LabelFile(object): suffix = ".json" def __init__(self, filename=None): self.shapes = [] self.imagePath = None self.imageData = None if filename is not None: self.load(filename) self.filename = filename @staticmethod def load_image_file(filename): try: image_pil = PIL.Image.open(filename) except IOError: logger.error("Failed opening image file: {}".format(filename)) return # apply orientation to image according to exif image_pil = utils.apply_exif_orientation(image_pil) with io.BytesIO() as f: ext = osp.splitext(filename)[1].lower() if PY2 and QT4: format = "PNG" elif ext in [".jpg", ".jpeg"]: format = "JPEG" else: format = "PNG" image_pil.save(f, format=format) f.seek(0) return f.read() def load(self, filename): keys = [ "version", "imageData", "imagePath", "shapes", # polygonal annotations "flags", # image level flags "imageHeight", "imageWidth", ] shape_keys = [ "label", "points", "bbox", "group_id", "shape_type", "flags", "content" ] try: with open(filename, "r") as f: data = json.load(f) version = data.get("version") if version is None: logger.warn( "Loading JSON file ({}) of unknown version".format( filename ) ) elif version.split(".")[0] != __version__.split(".")[0]: logger.warn( "This JSON file ({}) may be incompatible with " "current labelme. version in file: {}, " "current version: {}".format( filename, version, __version__ ) ) if data["imageData"] is not None: imageData = base64.b64decode(data["imageData"]) if PY2 and QT4: imageData = utils.img_data_to_png_data(imageData) else: # relative path from label file to relative path from cwd imagePath = osp.join(osp.dirname(filename), data["imagePath"]) imageData = self.load_image_file(imagePath) flags = data.get("flags") or {} imagePath = data["imagePath"] self._check_image_height_and_width( base64.b64encode(imageData).decode("utf-8"), data.get("imageHeight"), data.get("imageWidth"), ) shapes = [ dict( label=s["label"], points=s["points"], bbox = s["bbox"], shape_type=s.get("shape_type", "polygon"), flags=s.get("flags", {}), content=s.get("content"), group_id=s.get("group_id"), other_data={ k: v for k, v in s.items() if k not in shape_keys }, ) for s in data["shapes"] ] except Exception as e: raise LabelFileError(e) otherData = {} for key, value in data.items(): if key not in keys: otherData[key] = value # Only replace data after everything is loaded. self.flags = flags self.shapes = shapes self.imagePath = imagePath self.imageData = imageData self.filename = filename self.otherData = otherData @staticmethod def _check_image_height_and_width(imageData, imageHeight, imageWidth): img_arr = utils.img_b64_to_arr(imageData) if imageHeight is not None and img_arr.shape[0] != imageHeight: logger.error( "imageHeight does not match with imageData or imagePath, " "so getting imageHeight from actual image." ) imageHeight = img_arr.shape[0] if imageWidth is not None and img_arr.shape[1] != imageWidth: logger.error( "imageWidth does not match with imageData or imagePath, " "so getting imageWidth from actual image." ) imageWidth = img_arr.shape[1] return imageHeight, imageWidth def save( self, filename, shapes, imagePath, imageHeight, imageWidth, imageData=None, otherData=None, flags=None, ): if imageData is not None: imageData = base64.b64encode(imageData).decode("utf-8") imageHeight, imageWidth = self._check_image_height_and_width( imageData, imageHeight, imageWidth ) if otherData is None: otherData = {} if flags is None: flags = {} data = dict( version=__version__, flags=flags, shapes=shapes, imagePath=imagePath, imageData=imageData, imageHeight=imageHeight, imageWidth=imageWidth, ) for key, value in otherData.items(): assert key not in data data[key] = value try: with open(filename, "w") as f: json.dump(data, f, ensure_ascii=False, indent=2) self.filename = filename except Exception as e: raise LabelFileError(e) @staticmethod def is_label_file(filename): return osp.splitext(filename)[1].lower() == LabelFile.suffix ================================================ FILE: DLTA_AI_app/labelme/logger.py ================================================ import datetime import logging import os import termcolor if os.name == "nt": # Windows import colorama colorama.init() from . import __appname__ COLORS = { "WARNING": "yellow", "INFO": "white", "DEBUG": "blue", "CRITICAL": "red", "ERROR": "red", } class ColoredFormatter(logging.Formatter): def __init__(self, fmt, use_color=True): logging.Formatter.__init__(self, fmt) self.use_color = use_color def format(self, record): levelname = record.levelname if self.use_color and levelname in COLORS: def colored(text): return termcolor.colored( text, color=COLORS[levelname], attrs={"bold": True}, ) record.levelname2 = colored("{:<7}".format(record.levelname)) record.message2 = colored(record.msg) asctime2 = datetime.datetime.fromtimestamp(record.created) record.asctime2 = termcolor.colored(asctime2, color="green") record.module2 = termcolor.colored(record.module, color="cyan") record.funcName2 = termcolor.colored(record.funcName, color="cyan") record.lineno2 = termcolor.colored(record.lineno, color="cyan") return logging.Formatter.format(self, record) class ColoredLogger(logging.Logger): FORMAT = ( "[%(levelname2)s] %(module2)s:%(funcName2)s:%(lineno2)s - %(message2)s" ) def __init__(self, name): logging.Logger.__init__(self, name, logging.INFO) color_formatter = ColoredFormatter(self.FORMAT) console = logging.StreamHandler() console.setFormatter(color_formatter) self.addHandler(console) return logging.setLoggerClass(ColoredLogger) logger = logging.getLogger(__appname__) ================================================ FILE: DLTA_AI_app/labelme/shape.py ================================================ import copy import math from PyQt6 import QtCore from PyQt6 import QtGui import labelme.utils # TODO(unknown): # - [opt] Store paths instead of creating new ones at each paint. DEFAULT_LINE_COLOR = QtGui.QColor(0, 255, 0, 128) # bf hovering DEFAULT_FILL_COLOR = QtGui.QColor(0, 255, 0, 128) # hovering DEFAULT_SELECT_LINE_COLOR = QtGui.QColor(255, 255, 255) # selected DEFAULT_SELECT_FILL_COLOR = QtGui.QColor(0, 255, 0, 155) # selected DEFAULT_VERTEX_FILL_COLOR = QtGui.QColor(0, 255, 0, 255) # hovering DEFAULT_HVERTEX_FILL_COLOR = QtGui.QColor(255, 255, 255, 255) # hovering class Shape(object): # Render handles as squares P_SQUARE = 0 # Render handles as circles P_ROUND = 1 # Flag for the handles we would move if dragging MOVE_VERTEX = 0 # Flag for all other handles on the curent shape NEAR_VERTEX = 1 # The following class variables influence the drawing of all shape objects. line_color = DEFAULT_LINE_COLOR fill_color = DEFAULT_FILL_COLOR select_line_color = DEFAULT_SELECT_LINE_COLOR select_fill_color = DEFAULT_SELECT_FILL_COLOR vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR hvertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR point_type = P_ROUND point_size = 8 scale = 1.0 def __init__( self, label=None, line_color=None, shape_type=None, flags=None, group_id=None, content=None ): self.label = label self.group_id = group_id self.points = [] self.bbox = [] self.fill = False self.selected = False self.shape_type = shape_type self.flags = flags self.content = content self.other_data = {} self._highlightIndex = None self._highlightMode = self.NEAR_VERTEX self._highlightSettings = { self.NEAR_VERTEX: (4, self.P_ROUND), self.MOVE_VERTEX: (1.5, self.P_SQUARE), } self._closed = False if line_color is not None: # Override the class line_color attribute # with an object attribute. Currently this # is used for drawing the pending line a different color. self.line_color = line_color self.shape_type = shape_type @property def shape_type(self): return self._shape_type @shape_type.setter def shape_type(self, value): if value is None: value = "polygon" if value not in [ "polygon", "rectangle", "point", "line", "circle", "linestrip", ]: raise ValueError("Unexpected shape_type: {}".format(value)) self._shape_type = value def close(self): self._closed = True def addPoint(self, point): if self.points and point == self.points[0]: self.close() else: self.points.append(point) def canAddPoint(self): return self.shape_type in ["polygon", "linestrip"] def popPoint(self): if self.points: return self.points.pop() return None def insertPoint(self, i, point): self.points.insert(i, point) def removePoint(self, i): self.points.pop(i) def isClosed(self): return self._closed def setOpen(self): self._closed = False def getRectFromLine(self, pt1, pt2): x1, y1 = pt1.x(), pt1.y() x2, y2 = pt2.x(), pt2.y() return QtCore.QRectF(x1, y1, x2 - x1, y2 - y1) def paint(self, painter): if self.points: color = ( self.select_line_color if self.selected else self.line_color ) pen = QtGui.QPen(color) # Try using integer sizes for smoother drawing(?) pen.setWidth(max(1, int(round(2.0 / self.scale)))) painter.setPen(pen) line_path = QtGui.QPainterPath() vrtx_path = QtGui.QPainterPath() if self.shape_type == "rectangle": assert len(self.points) in [1, 2] if len(self.points) == 2: rectangle = self.getRectFromLine(*self.points) line_path.addRect(rectangle) for i in range(len(self.points)): self.drawVertex(vrtx_path, i) elif self.shape_type == "circle": assert len(self.points) in [1, 2] if len(self.points) == 2: rectangle = self.getCircleRectFromLine(self.points) line_path.addEllipse(rectangle) for i in range(len(self.points)): self.drawVertex(vrtx_path, i) elif self.shape_type == "linestrip": line_path.moveTo(self.points[0]) for i, p in enumerate(self.points): line_path.lineTo(p) self.drawVertex(vrtx_path, i) else: line_path.moveTo(self.points[0]) # Uncommenting the following line will draw 2 paths # for the 1st vertex, and make it non-filled, which # may be desirable. # self.drawVertex(vrtx_path, 0) for i, p in enumerate(self.points): line_path.lineTo(p) self.drawVertex(vrtx_path, i) if self.isClosed(): line_path.lineTo(self.points[0]) painter.drawPath(line_path) painter.drawPath(vrtx_path) painter.fillPath(vrtx_path, self._vertex_fill_color) if self.fill: color = ( self.select_fill_color if self.selected else self.fill_color ) painter.fillPath(line_path, color) def drawVertex(self, path, i): d = self.point_size / self.scale shape = self.point_type point = self.points[i] if i == self._highlightIndex: size, shape = self._highlightSettings[self._highlightMode] d *= size if self._highlightIndex is not None: self._vertex_fill_color = self.hvertex_fill_color else: self._vertex_fill_color = self.vertex_fill_color if shape == self.P_SQUARE: path.addRect(point.x() - d / 2, point.y() - d / 2, d, d) elif shape == self.P_ROUND: path.addEllipse(point, d / 2.0, d / 2.0) else: assert False, "unsupported vertex shape" def nearestVertex(self, point, epsilon): min_distance = float("inf") min_i = None for i, p in enumerate(self.points): dist = labelme.utils.distance(p - point) if dist <= epsilon and dist < min_distance: min_distance = dist min_i = i return min_i def nearestEdge(self, point, epsilon): min_distance = float("inf") post_i = None for i in range(len(self.points)): line = [self.points[i - 1], self.points[i]] dist = labelme.utils.distancetoline(point, line) if dist <= epsilon and dist < min_distance: min_distance = dist post_i = i return post_i def containsPoint(self, point): return self.makePath().contains(point) def getCircleRectFromLine(self, line): """Computes parameters to draw with `QPainterPath::addEllipse`""" if len(line) != 2: return None (c, point) = line r = line[0] - line[1] d = math.sqrt(math.pow(r.x(), 2) + math.pow(r.y(), 2)) rectangle = QtCore.QRectF(c.x() - d, c.y() - d, 2 * d, 2 * d) return rectangle def makePath(self): if self.shape_type == "rectangle": path = QtGui.QPainterPath() if len(self.points) == 2: rectangle = self.getRectFromLine(*self.points) path.addRect(rectangle) elif self.shape_type == "circle": path = QtGui.QPainterPath() if len(self.points) == 2: rectangle = self.getCircleRectFromLine(self.points) path.addEllipse(rectangle) else: path = QtGui.QPainterPath(self.points[0]) for p in self.points[1:]: path.lineTo(p) return path def boundingRect(self): return self.makePath().boundingRect() def moveBy(self, offset): self.points = [p + offset for p in self.points] def moveVertexBy(self, i, offset): self.points[i] = self.points[i] + offset def highlightVertex(self, i, action): """Highlight a vertex appropriately based on the current action Args: i (int): The vertex index action (int): The action (see Shape.NEAR_VERTEX and Shape.MOVE_VERTEX) """ self._highlightIndex = i self._highlightMode = action def highlightClear(self): """Clear the highlighted point""" self._highlightIndex = None def copy(self): return copy.deepcopy(self) def __len__(self): return len(self.points) def __getitem__(self, key): return self.points[key] def __setitem__(self, key, value): self.points[key] = value ================================================ FILE: DLTA_AI_app/labelme/testing.py ================================================ import json import os.path as osp import imgviz import labelme.utils def assert_labelfile_sanity(filename): assert osp.exists(filename) data = json.load(open(filename)) assert "imagePath" in data imageData = data.get("imageData", None) if imageData is None: parent_dir = osp.dirname(filename) img_file = osp.join(parent_dir, data["imagePath"]) assert osp.exists(img_file) img = imgviz.io.imread(img_file) else: img = labelme.utils.img_b64_to_arr(imageData) H, W = img.shape[:2] assert H == data["imageHeight"] assert W == data["imageWidth"] assert "shapes" in data for shape in data["shapes"]: assert "label" in shape assert "points" in shape for x, y in shape["points"]: assert 0 <= x <= W assert 0 <= y <= H ================================================ FILE: DLTA_AI_app/labelme/utils/__init__.py ================================================ # flake8: noqa from ._io import lblsave from .image import apply_exif_orientation from .image import img_arr_to_b64 from .image import img_b64_to_arr from .image import img_data_to_arr from .image import img_data_to_pil from .image import img_data_to_png_data from .image import img_pil_to_data from .shape import labelme_shapes_to_label from .shape import masks_to_bboxes from .shape import polygons_to_mask from .shape import shape_to_mask from .shape import shapes_to_label from .qt import newIcon from .qt import newButton from .qt import newAction from .qt import addActions from .qt import labelValidator from .qt import struct from .qt import distance from .qt import distancetoline from .qt import fmtShortcut from .export import exportCOCO, exportCOCOvid, exportMOT, FolderDialog, parse_img_export from .model_explorer import ModelExplorerDialog from labelme.widgets.links import open_git_hub, open_license, open_guide from labelme.widgets import runtime_data_UI, preferences_UI, shortcut_selector_UI, check_updates_UI, feedback_UI from .vid_to_frames import VideoFrameExtractor ================================================ FILE: DLTA_AI_app/labelme/utils/_io.py ================================================ import os.path as osp import numpy as np import PIL.Image def lblsave(filename, lbl): import imgviz if osp.splitext(filename)[1] != ".png": filename += ".png" # Assume label ranses [-1, 254] for int32, # and [0, 255] for uint8 as VOC. if lbl.min() >= -1 and lbl.max() < 255: lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode="P") colormap = imgviz.label_colormap() lbl_pil.putpalette(colormap.flatten()) lbl_pil.save(filename) else: raise ValueError( "[%s] Cannot save the pixel-wise class label as PNG. " "Please consider using the .npy format." % filename ) ================================================ FILE: DLTA_AI_app/labelme/utils/custom_exports.py ================================================ # Don't Modify These Lines # ========================================= custom_exports_list = [] # custom export class blueprint class CustomExport: """ A blueprint for defining custom exports. Attributes: file_name (str): The name of the file to export to. button_name (str): The name of the button that triggers the export. format (str): The format of the exported file. function (callable): The function that generates the export data. mode (str): The mode of the export, either "video" or "image". Methods: __call__(*args): Calls the function with the given arguments and returns the result. """ def __init__(self, file_name, button_name, format, function, mode = "video"): """ Initializes a new instance of the CustomExport class. Args: file_name (str): The name of the file to export to. button_name (str): The name of the button that triggers the export. format (str): The format of the exported file. function (callable): The function that generates the export data. mode (str): The mode of the export, either "video" or "image". """ self.file_name = file_name self.button_name = button_name self.format = format self.function = function self.mode = mode custom_exports_list.append(self) def __call__(self, *args): """ Calls the function with the given arguments and returns the result. Args: *args: The arguments to pass to the function. Returns: The result of calling the function with the given arguments. """ return self.function(*args) # ========================================= # Add your functions here () """ These functions must be divided as following: 1- helper functions: functions that are used by other functions and are not exported, no restrictions on them at all Example: foo() in the dummy functions below. 2- exported functions (video): functions that are exported (video mode only): they take the following arguments: results_file (str): Path to the JSON file containing the object detection results. vid_width (int): Width of the video frames. vid_height (int): Height of the video frames. annotation_path (str): Path to the output COCO annotation file. and return annotation_path (str): to check if the function is working properly. Example: bar() in the dummy functions below. 3- exported functions (image/dir): functions that are exported (image and dir mode * including video as frames *): they take the following arguments: json_paths (list): List of paths to the JSON files containing the object detection results. annotation_path (str): The path to the output file. and return annotation_path (str): to check if the function is working properly. Example: baz() in the dummy functions below. ** WARINING: All EXPORT FUNCTIONS MUST HAVE THE SAME ARGUMENTS OR ELSE THEY WILL NOT WORK. ** It's recommended to check `exports.py` file to see how the functions are called """ # ========================================= # dummy functions for testing def foo(): print("foo") def bar(results_file, vid_width, vid_height, annotation_path): foo() print("bar") print(f"Export Function Check: results_file: {results_file} | vid_width: {vid_width} | vid_height: {vid_height} | annotation_path: {annotation_path}") return annotation_path def baz(json_paths, annotation_path): foo() print("baz") print(f"Export Function Check: json_paths {json_paths} | annotation_path: {annotation_path}") return annotation_path def count_objects(json_paths, annotation_path): import matplotlib.pyplot as plt import json labels = [] counts = [] # Loop through each JSON file for i in range(len(json_paths)): with open(json_paths[i]) as f: labels.append(json_paths[i].split("time_")[-1].split(".")[0].replace("_", ":")[-5:]) # Load the JSON data data = json.load(f) inner_count = 0 for j in range(len(data["shapes"])): inner_count += 1 counts.append(inner_count) # Plot the counts plt.figure(figsize=(20, 12)) plt.plot(counts) plt.title("Number of Objects Over Time") plt.tight_layout(pad=3) plt.grid() plt.xticks(range(len(labels)), labels, rotation=90) plt.yticks(range(max(counts)+1)) plt.xlabel("Time") plt.ylabel("Number of Objects") plt.savefig(annotation_path) plt.close() return annotation_path # ========================================= # create your custom exports here # ========================================= # dummy exports for testing # CustomExport("file_name", "video test1", "format", bar, "video") # CustomExport("file_name", "image test1", "format", baz, "image") CustomExport("plot_counts", "Plot Counts", "png", count_objects, "image") # ========================================= ================================================ FILE: DLTA_AI_app/labelme/utils/export.py ================================================ import datetime import glob import json import os import csv import numpy as np from PyQt6.QtWidgets import QFileDialog coco_classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] def center_of_polygon(polygon): """ Calculates the center of a polygon defined by a list of consecutive pairs of vertices. Args: polygon (list): A list of consecutive pairs of vertices. Returns: tuple: The center point of the polygon as a tuple of two integers. """ # Extract x and y coordinates from the list of polygon vertices x_coords = polygon[::2] # Get every other element starting from the first (x-coordinates) y_coords = polygon[1::2] # Get every other element starting from the second (y-coordinates) # Calculate the center point of the polygon center_x = sum(x_coords) / len(x_coords) # Calculate the average x-coordinate center_y = sum(y_coords) / len(y_coords) # Calculate the average y-coordinate center = [center_x, center_y] # Store the center point as a list # Find the center of the bounding box of the polygon xmin = min(x_coords) # Get the minimum x-coordinate xmax = max(x_coords) # Get the maximum x-coordinate ymin = min(y_coords) # Get the minimum y-coordinate ymax = max(y_coords) # Get the maximum y-coordinate centers_rec = [(xmin + xmax) / 2, (ymin + ymax) / 2] # Calculate the center of the bounding box # Calculate the final center point as a weighted average of the polygon center and the bounding box center (xp, yp) = centers_rec # Unpack the bounding box center coordinates (xn, yn) = center # Unpack the polygon center coordinates r = 0.5 # Set the weight for the polygon center x = r * xn + (1 - r) * xp # Calculate the weighted average x-coordinate y = r * yn + (1 - r) * yp # Calculate the weighted average y-coordinate center = (int(x), int(y)) # Store the final center point as a tuple of integers return center # Return the final center point def get_bbox(segmentation): """ Calculates the bounding box of a polygon defined by a list of consecutive pairs of x-y coordinates. Args: segmentation (list): A list of consecutive pairs of x-y coordinates that define a polygon. Returns: list: A list of four values: the minimum x and y values, and the width and height of the bounding box that encloses the polygon. """ try: x = [] y = [] # Extract x and y coordinates from the segmentation list for i in range(len(segmentation)): if i % 2 == 0: x.append(segmentation[i]) else: y.append(segmentation[i]) # Calculate the minimum x and y values, and the width and height of the bounding box return [min(x), min(y), max(x) - min(x), max(y) - min(y)] except: # If an exception occurs (e.g. if the segmentation list is empty), convert it into a 1D array segmentation = [item for sublist in segmentation for item in sublist] x = [] y = [] # Extract x and y coordinates from the 1D segmentation array for i in range(len(segmentation)): if i % 2 == 0: x.append(segmentation[i]) else: y.append(segmentation[i]) # Calculate the minimum x and y values, and the width and height of the bounding box return [min(x), min(y), max(x) - min(x), max(y) - min(y)] def get_area_from_polygon(polygon, mode="segmentation"): """ Calculates the area of a polygon defined by a list of consecutive pairs of x-y coordinates. Args: polygon (list): A list of consecutive pairs of x-y coordinates that define a polygon. mode (str): The mode to use for calculating the area. Can be "segmentation" (default) or "bbox". Returns: float: The area of the polygon. """ if mode == "segmentation": # Convert the list to a numpy array of shape (n, 2) where n is the number of vertices polygon = np.array(polygon).reshape(-1, 2) # Use the shoelace formula to calculate the area of the polygon area = 0.5 * np.abs(np.dot(polygon[:, 0], np.roll(polygon[:, 1], 1)) - np.dot(polygon[:, 1], np.roll(polygon[:, 0], 1))) # Return the area return area elif mode == "bbox": # Unpack the list into variables x_min, y_min, width, height = polygon # Calculate the area by multiplying the width and height area = width * height # Return the area return area else: raise ValueError("mode must be either 'segmentation' or 'bbox'") # the parsing function is called in the main export function (in app.py) before exporting in image or dir mode def parse_img_export(target_directory, save_path): import json import glob # If the target is not a directory, set the file path to the save path try: if target_directory == "": image_mode = True else: image_mode = False # Get all the JSON files in the specified directory json_paths = glob.glob(f"{target_directory}/*.json") if image_mode: json_paths = [save_path] # Raise an error if no JSON files are found in the directory if len(json_paths) == 0: raise ValueError("No json files found in the directory") except Exception as e: print(f"Error parsing image export: {e}") return None return json_paths def exportCOCO(json_paths, annotation_path): """ Export annotations in COCO format from a directory of JSON files for image and dir modes Args: target_directory (str): The directory containing the JSON files (dir) save_path (str): The path to save the output file (image mode) annotation_path (str): The path to the output file. Returns: str: The path to the output file. Raises: ValueError: If no JSON files are found in the directory. """ # Create a dictionary to store the file info file = {} # Write the info header file["info"] = { "description": "Exported from DLTA-AI", # "url": "n/a", # "version": "n/a", "year": datetime.datetime.now().year, # "contributor": "n/a", "date_created": datetime.date.today().strftime("%Y/%m/%d") } # Create an empty set to store the used classes used_classes = set() # Create empty lists to store annotations and images annotations = [] images = [] # Loop through each JSON file for i in range(len(json_paths)): try: with open(json_paths[i]) as f: # Load the JSON data data = json.load(f) # Add image data to the images list images.append({ "id": i, "width": data["imageWidth"], "height": data["imageHeight"], "file_name": json_paths[i].split("/")[-1].replace(".json", ".jpg"), }) # Loop through each shape in the JSON data for j in range(len(data["shapes"])): # Skip shapes with no points if len(data["shapes"][j]["points"],) == 0: continue # Add the class to the used_classes set if it hasn't been added yet if data["shapes"][j]["label"].lower() not in coco_classes: print(f"{data['shapes'][j]['label']} is not a valid COCO class.. Adding it to the list.") coco_classes.append((data["shapes"][j]["label"].lower())) # Add annotation data to the annotations list annotations.append({ "id": len(annotations), "image_id": i, "category_id": coco_classes.index(data["shapes"][j]["label"].lower()) + 1, "bbox": get_bbox(data["shapes"][j]["points"]), "iscrowd": 0 }) # Try to add segmentation and area data to the annotation try: annotations[-1]["segmentation"] = [data["shapes"][j]["points"]] annotations[-1]["area"] = get_area_from_polygon( annotations[-1]["segmentation"][0], mode="segmentation") except: annotations[-1]["area"] = get_area_from_polygon( annotations[-1]["bbox"], mode="bbox") # Try to add score data to the annotation try: annotations[-1]["score"] = float(data["shapes"][j]["content"]) except: pass # Add the class to the used_classes set used_classes.add(coco_classes.index(data["shapes"][j]["label"].lower()) + 1) # If there's an error with the JSON file, print the error and continue to the next file except Exception as e: print(f"Error with {json_paths[i]}") print(e) continue # Sort the used_classes set and add the categories to the file dictionary used_classes = sorted(used_classes) file["categories"] = [{"id": i, "name": coco_classes[i - 1]} for i in used_classes] # Add the images and annotations to the file dictionary file["images"] = images file["annotations"] = annotations # Write the file dictionary to the output file in JSON format with open(annotation_path, 'w') as outfile: json.dump(file, outfile, indent=4) # Return the path to the output file return annotation_path def exportCOCOvid(results_file, vid_width, vid_height, annotation_path): """ Export object detection results in COCO format for a video. Args: results_file (str): Path to the JSON file containing the object detection results. vid_width (int): Width of the video frames. vid_height (int): Height of the video frames. annotation_path (str): Path to the output COCO annotation file. Returns: str: Path to the output COCO annotation file. Raises: ValueError: If no object detection results are found in the JSON file. """ file = {} file["info"] = { "description": "Exported from DLTA-AI", # "url": "n/a", # "version": "n/a", "year": datetime.datetime.now().year, # "contributor": "n/a", "date_created": datetime.date.today().strftime("%Y/%m/%d") } annotations = [] images = [] # Create an empty set to store the used classes used_classes = set() # Open the results file and load the JSON data with open(results_file) as f: data = json.load(f) # Loop through each frame in the JSON data for frame in data: # Skip frames with no object detection results if len(frame["frame_data"]) == 0: continue # Add image data to the images list images.append({ "id": frame["frame_idx"], "width": vid_width, "height": vid_height, "file_name": f"frame {frame['frame_idx']}", }) # Loop through each object in the frame for object in frame["frame_data"]: # Add annotation data to the annotations list annotations.append({ "id": len(annotations), "image_id": frame["frame_idx"], "category_id": object["class_id"] + 1, "iscrowd": 0 }) # If the category ID is 0, add the class to the coco_classes list and update the category ID if annotations[-1]["category_id"] == 0: coco_classes.append(object["class_name"].lower()) annotations[-1]["category_id"] = coco_classes.index(object["class_name"].lower()) + 1 # Try to add the object's segmentation data to the annotation try: annotations[-1]["bbox"] = get_bbox(object["segment"]) annotations[-1]["segmentation"] = [ [val for sublist in object["segment"] for val in sublist]] annotations[-1]["area"] = get_area_from_polygon( annotations[-1]["segmentation"][0], mode="segmentation") except: # If the segmentation data is not available, use the object's bounding box data instead annotations[-1]["bbox"] = object["bbox"] annotations[-1]["area"] = get_area_from_polygon( annotations[-1]["bbox"], mode="bbox") # Try to add the object's confidence score to the annotation try: annotations[-1]["score"] = float(object["confidence"]) except: pass # Add the category ID to the used_classes set used_classes.add(annotations[-1]["category_id"]) # Sort the used_classes set and add the categories to the file dictionary used_classes = sorted(list(used_classes)) file["categories"] = [{"id": i, "name": coco_classes[i - 1]} for i in used_classes] # Add the images and annotations to the file dictionary file["images"] = images file["annotations"] = annotations # Write the file dictionary to the output file in JSON format with open(annotation_path, 'w') as outfile: json.dump(file, outfile, indent=4) # Return the path to the output file return annotation_path def exportMOT(results_file, annotation_path): """ Export object tracking results in MOT format. Args: results_file (str): Path to the JSON file containing the object tracking results. annotation_path (str): Path to the output MOT annotation file. Returns: str: Path to the output MOT annotation file. """ # Open the results file and load the JSON data with open(results_file) as f, open(annotation_path, 'w') as outfile: # Loop through each frame in the JSON data for frame in json.load(f): for object in frame["frame_data"]: # Write the object tracking data to the output file outfile.write(f'{frame["frame_idx"]}, {object["tracker_id"]}, {object["bbox"][0]}, {object["bbox"][1]}, {object["bbox"][2]}, {object["bbox"][3]}, {object["confidence"]}, {object["class_id"] + 1}, 1\n') # Return the path to the output file return annotation_path class FolderDialog(QFileDialog): """ A custom file dialog that allows the user to save a file with a default file name and format. Args: default_file_name (str): The default file name to use. default_format (str): The default file format to use. """ def __init__(self, default_file_name, default_format): """ Initializes the FolderDialog object. Args: default_file_name (str): The default file name to use. default_format (str): The default file format to use. """ # Call the parent constructor super().__init__() # Set the mode to save a file self.setAcceptMode(QFileDialog.AcceptMode.AcceptSave) # Set the default file name self.selectFile(default_file_name) # Set the default format self.setNameFilters( [f"{default_format.upper()} (*.{default_format.lower()})", "All Files (*)"]) self.selectNameFilter( f"{default_format.upper()} (*.{default_format.lower()})") # Set dialog title self.setWindowTitle("Save Annotations") ================================================ FILE: DLTA_AI_app/labelme/utils/helpers/mathOps.py ================================================ import numpy as np import random import cv2 from PyQt6 import QtGui from PyQt6 import QtCore from labelme import PY2 import os import json import orjson import copy from shapely.geometry import Polygon import skimage from labelme.shape import Shape coco_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] # make a list of 12 unique colors as we will use them to draw bounding boxes of different classes in different colors # so the calor palette will be used to draw bounding boxes of different classes in different colors # the color pallette should have the famous 12 colors as red, green, blue, yellow, cyan, magenta, white, black, gray, brown, pink, and orange in bgr format import random import colorsys class ColorGen: """ A class for generating colors using the HLS color model. Attributes: colors (list): A list to store the generated colors. Methods: generateColors(num, lightness, saturation): Generates the specified number of colors based on the given lightness and saturation values. """ def __init__(self): self.colors = [] def generateColors(self, num, lightness, saturation): """ Generates the specified number of colors based on the given lightness and saturation values. Args: num (int): The number of colors to generate. lightness (float): The lightness value for the generated colors (0.0 to 1.0). saturation (float): The saturation value for the generated colors (0.0 to 1.0). Returns: List of generated colors in RGB format. """ h = 0.314159265359 golden_ratio_conjugate = 0.618033988749895 for _ in range(num): h += golden_ratio_conjugate h %= 1 rgb = colorsys.hls_to_rgb(h, lightness, saturation) self.colors.append([int(i * 255.0) for i in rgb]) return self.colors c = ColorGen() color_palette = c.generateColors(20,0.45,0.6) def get_bbox_xyxy(segment): """ Summary: Get the bounding box of a polygon in format of [xmin, ymin, xmax, ymax]. Args: segment: a list of points Returns: bbox: [x, y, w, h] """ segment = np.array(segment) x0 = np.min(segment[:, 0]) y0 = np.min(segment[:, 1]) x1 = np.max(segment[:, 0]) y1 = np.max(segment[:, 1]) return [x0, y0, x1, y1] def addPoints(shape, n): """ Summary: Add points to a polygon. Args: shape: a list of points n: number of points to add Returns: res: a list of points """ # calculate number of points to add between each pair of points sub = 1.0 * n / (len(shape) - 1) # if sub == 0, then n == 0, no need to add points if sub == 0: return shape # if sub < 1, then we a point between every pair of points then we handle the points again if sub < 1: res = [] res.append(shape[0]) for i in range(len(shape) - 1): newPoint = [(shape[i][0] + shape[i + 1][0]) / 2, (shape[i][1] + shape[i + 1][1]) / 2] res.append(newPoint) res.append(shape[i + 1]) return handlePoints(res, n + len(shape)) # if sub > 1, then we add 'toBeAdded' points between every pair of points else: toBeAdded = int(sub) + 1 res = [] res.append(shape[0]) for i in range(len(shape) - 1): dif = [shape[i + 1][0] - shape[i][0], shape[i + 1][1] - shape[i][1]] for j in range(1, toBeAdded): newPoint = [shape[i][0] + dif[0] * j / toBeAdded, shape[i][1] + dif[1] * j / toBeAdded] res.append(newPoint) res.append(shape[i + 1]) # recursive call to check if there are any points to add return addPoints(res, n + len(shape) - len(res)) def reducePoints(polygon, n): """ Summary: Remove points from a polygon. Args: polygon: a list of points n: number of points to reduce to Returns: polygon: a list of points """ # if n >= len(polygon), then no need to reduce if n >= len(polygon): return polygon # calculate the distance between each point and: # 1- its previous point # 2- its next point # 3- the middle point between its previous and next points # taking the minimum of these distances as the distance of the point distances = polygon.copy() for i in range(len(polygon)): x1,y1,x2,y2 = polygon[i-1][0], polygon[i-1][1], polygon[(i+1)%len(polygon)][0], polygon[(i+1)%len(polygon)][1] x,y = polygon[i][0], polygon[i][1] if x1 == x2: dist_perp = abs(x - x1) elif y1 == y2: dist_perp = abs(y - y1) else: m = (y2 - y1) / (x2 - x1) c = y1 - m * x1 dist_perp = abs(m * x - y + c) / np.sqrt(m * m + 1) dif_right = np.array( polygon[(i + 1) % len(polygon)]) - np.array(polygon[i]) dist_right = np.sqrt( dif_right[0] * dif_right[0] + dif_right[1] * dif_right[1]) dif_left = np.array(polygon[i - 1]) - np.array(polygon[i]) dist_left = np.sqrt( dif_left[0] * dif_left[0] + dif_left[1] * dif_left[1]) distances[i] = min(dist_perp, dist_right, dist_left) # adding small random values to distances to avoid duplicate minimum distances # it will not affect the result distances = [distances[i] + random.random() for i in range(len(distances))] ratio = 1.0 * n / len(polygon) threshold = np.percentile(distances, 100 - ratio * 100) i = 0 while i < len(polygon): if distances[i] < threshold: polygon[i] = None i += 1 i += 1 res = [x for x in polygon if x is not None] # recursive call to check if there are any points to remove return reducePoints(res, n) def handlePoints(polygon, n): """ Summary: Add or remove points from a polygon. Args: polygon: a list of points n: number of points that the polygon should have Returns: polygon: a list of points """ # if n == len(polygon), then no need to add or remove points if n == len(polygon): return polygon # if n > len(polygon), then we need to add points elif n > len(polygon): return addPoints(polygon, n - len(polygon)) # if n < len(polygon), then we need to remove points else: return reducePoints(polygon, n) def handleTwoSegments(segment1, segment2): """ Summary: Add or remove points from two polygons to make them have the same number of points. Args: segment1: a list of points segment2: a list of points Returns: segment1: a list of points segment2: a list of points """ if len(segment1) != len(segment2): biglen = max(len(segment1), len(segment2)) segment1 = handlePoints(segment1, biglen) segment2 = handlePoints(segment2, biglen) (segment1, segment2) = allign(segment1, segment2) return (segment1, segment2) def allign(shape1, shape2): """ Summary: Allign the points of two polygons according to their slopes. Args: shape1: a list of points shape2: a list of points Returns: shape1_alligned: a list of points shape2_alligned: a list of points """ shape1_center = centerOFmass(shape1) shape1_org = [[shape1[i][0] - shape1_center[0], shape1[i] [1] - shape1_center[1]] for i in range(len(shape1))] shape2_center = centerOFmass(shape2) shape2_org = [[shape2[i][0] - shape2_center[0], shape2[i] [1] - shape2_center[1]] for i in range(len(shape2))] # sorting the points according to their slopes sorted_shape1 = sorted(shape1_org, key=lambda x: np.arctan2(x[1], x[0]), reverse=True) sorted_shape2 = sorted(shape2_org, key=lambda x: np.arctan2(x[1], x[0]), reverse=True) shape1_alligned = [[sorted_shape1[i][0] + shape1_center[0], sorted_shape1[i] [1] + shape1_center[1]] for i in range(len(sorted_shape1))] shape2_alligned = [[sorted_shape2[i][0] + shape2_center[0], sorted_shape2[i] [1] + shape2_center[1]] for i in range(len(sorted_shape2))] return (shape1_alligned, shape2_alligned) def centerOFmass(points): """ Summary: Calculate the center of mass of a polygon. Args: points: a list of points Returns: center: a list of points """ nppoints = np.array(points) sumX = np.sum(nppoints[:, 0]) sumY = np.sum(nppoints[:, 1]) return [int(sumX / len(points)), int(sumY / len(points))] def flattener(list_2d): """ Summary: Flatten a list of QTpoints. Args: list_2d: a list of QTpoints Returns: points: a list of points """ points = [(p.x(), p.y()) for p in list_2d] points = np.array(points, np.int16).flatten().tolist() return points def mapFrameToTime(frameNumber, fps): """ Summary: Map a frame number to its time in the video. Args: frameNumber: the frame number fps: the frame rate of the video Returns: frameHours: the hours of the frame frameMinutes: the minutes of the frame frameSeconds: the seconds of the frame frameMilliseconds: the milliseconds of the frame """ # get the time of the frame frameTime = frameNumber / fps frameHours = int(frameTime / 3600) frameMinutes = int((frameTime - frameHours * 3600) / 60) frameSeconds = int(frameTime - frameHours * 3600 - frameMinutes * 60) frameMilliseconds = int( (frameTime - frameHours * 3600 - frameMinutes * 60 - frameSeconds) * 1000) # print them in formal time format return frameHours, frameMinutes, frameSeconds, frameMilliseconds def class_name_to_id(class_name): """ Summary: Map a class name to its id in the coco dataset. Args: class_name: the class name Returns: class_id: the id of the class """ try: # map from coco_classes(a list of coco class names) to class_id return coco_classes.index(class_name) except: # this means that the class name is not in the coco dataset return -1 def compute_iou(box1, box2): """ Summary: Computes IOU between two bounding boxes. Args: box1 (list): List of 4 coordinates (xmin, ymin, xmax, ymax) of the first box. box2 (list): List of 4 coordinates (xmin, ymin, xmax, ymax) of the second box. Returns: iou (float): IOU between the two boxes. """ # Compute intersection coordinates xmin = max(box1[0], box2[0]) ymin = max(box1[1], box2[1]) xmax = min(box1[2], box2[2]) ymax = min(box1[3], box2[3]) # Compute intersection area if xmin < xmax and ymin < ymax: intersection_area = (xmax - xmin) * (ymax - ymin) else: intersection_area = 0 # Compute union area box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1]) box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1]) union_area = box1_area + box2_area - intersection_area # Compute IOU iou = intersection_area / union_area if union_area > 0 else 0 return iou def compute_iou_exact(shape1, shape2): """ Summary: Computes IOU between two polygons. Args: shape1 (list): List of 2D coordinates(also list) of the first polygon. shape2 (list): List of 2D coordinates(also list) of the second polygon. Returns: iou (float): IOU between the two polygons. """ shape1 = [tuple(x) for x in shape1] shape2 = [tuple(x) for x in shape2] polygon1 = Polygon(shape1) polygon2 = Polygon(shape2) if polygon1.intersects(polygon2) is False: return 0 intersection = polygon1.intersection(polygon2).area union = polygon1.union(polygon2).area iou = intersection / union if union > 0 else 0 return iou def match_detections_with_tracks(detections, tracks, iou_threshold=0.5): """ Summary: Match detections with tracks based on their bounding boxes using IOU threshold. Args: detections (list): List of detections, each detection is a dictionary with keys (bbox, confidence, class_id) tracks (list): List of tracks, each track is a tuple of (bboxes, track_id, class, conf) iou_threshold (float): IOU threshold for matching detections with tracks. Returns: matched_detections (list): List of detections that are matched with tracks, each detection is a dictionary with keys (bbox, confidence, class_id) unmatched_detections (list): List of detections that are not matched with any tracks, each detection is a dictionary with keys (bbox, confidence, class_id) """ matched_detections = [] unmatched_detections = [] # Loop through each detection for detection in detections: detection_bbox = detection['bbox'] # Loop through each track max_iou = 0 matched_track = None for track in tracks: track_bbox = track[0:4] # Compute IOU between detection and track iou = compute_iou(detection_bbox, track_bbox) # Check if IOU is greater than threshold and better than previous matches if iou > iou_threshold and iou > max_iou: matched_track = track max_iou = iou # If a track was matched, add detection to matched_detections list and remove the matched track from tracks list if matched_track is not None: detection['group_id'] = int(matched_track[4]) matched_detections.append(detection) tracks.remove(matched_track) else: unmatched_detections.append(detection) return matched_detections, unmatched_detections def get_boxes_conf_classids_segments(shapes): """ Summary: Get bounding boxes, confidences, class ids, and segments from shapes (NOT QT). Args: shapes: a list of shapes Returns: boxes: a list of bounding boxes confidences: a list of confidences class_ids: a list of class ids segments: a list of segments """ boxes = [] confidences = [] class_ids = [] segments = [] for s in shapes: label = s["label"] points = s["points"] # points are one dimensional array of x1,y1,x2,y2,x3,y3,x4,y4 # we will convert it to a 2 dimensional array of points (segment) segment = [] for j in range(0, len(points), 2): segment.append([int(points[j]), int(points[j + 1])]) # if points is empty pass # if len(points) == 0: # continue segments.append(segment) boxes.append(get_bbox_xyxy(segment)) confidences.append(float(s["content"])) class_ids.append(coco_classes.index( label)if label in coco_classes else -1) return boxes, confidences, class_ids, segments def convert_qt_shapes_to_shapes(qt_shapes): """ Summary: Convert QT shapes to shapes. Args: qt_shapes: a list of QT shapes Returns: shapes: a list of shapes """ shapes = [] for s in qt_shapes: shapes.append(dict( label=s.label.encode("utf-8") if PY2 else s.label, # convert points into 1D array points=flattener(s.points), bbox=get_bbox_xyxy([(p.x(), p.y()) for p in s.points]), group_id=s.group_id, content=s.content, shape_type=s.shape_type, flags=s.flags, )) return shapes def convert_shapes_to_qt_shapes(shapes): qt_shapes = [] for shape in shapes: label = shape["label"] points = shape["points"] bbox = shape["bbox"] shape_type = shape["shape_type"] # flags = shape["flags"] content = shape["content"] group_id = shape["group_id"] # other_data = shape["other_data"] if not points: # skip point-empty shape continue shape = Shape( label=label, shape_type=shape_type, group_id=group_id, content=content, ) for i in range(0, len(points), 2): shape.addPoint(QtCore.QPointF(points[i], points[i + 1])) shape.close() qt_shapes.append(shape) return qt_shapes def convert_QT_to_cv(incomingImage): """ Summary: Convert QT image to cv image MAT format. Args: incomingImage: a QT image Returns: arr: a cv image MAT format """ incomingImage = incomingImage.convertToFormat(QtGui.QImage.Format.Format_ARGB32) width = incomingImage.width() height = incomingImage.height() ptr = incomingImage.bits() ptr.setsize(incomingImage.sizeInBytes()) arr = np.array(ptr).reshape(height, width, 4) # Copies the data return arr def convert_cv_to_qt(cv_img): """ Summary: Convert cv image to QT image format. Args: cv_img: a cv image Returns: convert_to_Qt_format: a QT image format """ rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape bytes_per_line = ch * w convert_to_Qt_format = QtGui.QImage( rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format.Format_RGB888) return convert_to_Qt_format def SAM_rects_to_boxes(rects): """ Summary: Convert a list of QT rectangles to a list of bounding boxes. Args: rects: a list of QT rectangles Returns: res: a list of bounding boxes """ res = [] for rect in rects: listPOINTS = [min(rect[0].x(), rect[1].x()), min(rect[0].y(), rect[1].y()), max(rect[0].x(), rect[1].x()), max(rect[0].y(), rect[1].y())] listPOINTS = [int(round(x)) for x in listPOINTS] res.append(listPOINTS) if len(res) == 0: res = None return res def SAM_points_and_labels_from_coordinates(coordinates): """ Summary: Convert a list of coordinates to a list of points and a list of labels. Args: coordinates: a list of coordinates Returns: input_points: a list of points input_labels: a list of labels """ input_points = [] input_labels = [] for coordinate in coordinates: input_points.append( [int(round(coordinate[0])), int(round(coordinate[1]))]) input_labels.append(coordinate[2]) if len(input_points) == 0: input_points = None input_labels = None else: input_points = np.array(input_points) input_labels = np.array(input_labels) return input_points, input_labels def load_objects_from_json__json(json_file_name, nTotalFrames): """ Summary: Load objects from a json file using json library. Args: json_file_name: the name of the json file nTotalFrames: the total number of frames Returns: listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) """ listObj = [{'frame_idx': i + 1, 'frame_data': []} for i in range(nTotalFrames)] if not os.path.exists(json_file_name): with open(json_file_name, 'w') as jf: json.dump(listObj, jf, indent=4, separators=(',', ': ')) jf.close() with open(json_file_name, 'r') as jf: listObj = json.load(jf) jf.close() return listObj def load_objects_to_json__json(json_file_name, listObj): """ Summary: Load objects to a json file using json library. Args: json_file_name: the name of the json file listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) Returns: None """ with open(json_file_name, 'w') as json_file: json.dump(listObj, json_file, indent=4, separators=(',', ': ')) json_file.close() def load_objects_from_json__orjson(json_file_name, nTotalFrames): """ Summary: Load objects from a json file using orjson library. Args: json_file_name: the name of the json file nTotalFrames: the total number of frames Returns: listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) """ listObj = [{'frame_idx': i + 1, 'frame_data': []} for i in range(nTotalFrames)] if not os.path.exists(json_file_name): with open(json_file_name, "wb") as jf: jf.write(orjson.dumps(listObj)) jf.close() with open(json_file_name, "rb") as jf: listObj = orjson.loads(jf.read()) jf.close() return listObj def load_objects_to_json__orjson(json_file_name, listObj): """ Summary: Load objects to a json file using orjson library. Args: json_file_name: the name of the json file listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) Returns: None """ with open(json_file_name, "wb") as jf: jf.write(orjson.dumps(listObj, option=orjson.OPT_INDENT_2)) jf.close() def scaleQTshape(self, originalshape, center, ratioX, ratioY): """ Summary: Scale a QT shape live in the canvas. according to a center point and two ratios. Args: self: the main window object to access the canvas originalshape: the original shape center: the center point ratioX: the ratio of the x axis ratioY: the ratio of the y axis Returns: None """ ratioX = ratioX / 100 ratioY = ratioY / 100 shape = self.canvas.selectedShapes[0] self.canvas.shapes.remove(shape) self.canvas.selectedShapes.remove(shape) self.remLabels([shape]) for i in range(len(shape.points)): shape.points[i].setX( (originalshape.points[i].x() - center[0]) * ratioX + center[0]) shape.points[i].setY( (originalshape.points[i].y() - center[1]) * ratioY + center[1]) self.canvas.shapes.append(shape) self.canvas.selectedShapes.append(shape) self.addLabel(shape) def is_id_repeated(self, group_id, frameIdex=-1): """ Summary: Check if a group id is repeated in the current frame or in all frames. Args: self: the main window object to access the canvas group_id: the group id frameIdex: the frame index (-1 means the current frame) Returns: True if the group id is repeated, False otherwise """ if frameIdex == -1: frameIdex = self.INDEX_OF_CURRENT_FRAME listObj = self.load_objects_from_json__orjson() for object_ in listObj[frameIdex - 1]['frame_data']: if object_['tracker_id'] == group_id: return True return False def checkKeyFrames(ids, keyFrames): """ Summary: Check if all the ids have at least two key frames. Args: ids: a list of ids keyFrames: a dictionary of key frames Returns: allAccepted: True if all the ids have at least two key frames, False otherwise idsToTrack: a list of ids that have at least two key frames """ idsToTrack = [] allAccepted = True for id in ids: try: if len(keyFrames['id_' + str(id)]) == 1: allAccepted = False else: idsToTrack.append(id) except: allAccepted = False allRejected = len(idsToTrack) == 0 return allAccepted, allRejected, idsToTrack def getInterpolated(baseObject, baseObjectFrame, nextObject, nextObjectFrame, curFrame): """ Summary: Interpolate a shape between two frames using linear interpolation. Args: baseObject: the base object baseObjectFrame: the base object frame nextObject: the next object nextObjectFrame: the next object frame curFrame: the frame to interpolate Returns: cur: the interpolated shape """ prvR = (nextObjectFrame - curFrame) / (nextObjectFrame - baseObjectFrame) nxtR = (curFrame - baseObjectFrame) / (nextObjectFrame - baseObjectFrame) cur_bbox = prvR * np.array(baseObject['bbox']) + nxtR * np.array(nextObject['bbox']) cur_bbox = [int(cur_bbox[i]) for i in range(len(cur_bbox))] (baseObject['segment'], nextObject['segment']) = handleTwoSegments( baseObject['segment'], nextObject['segment']) cur_segment = prvR * np.array(baseObject['segment']) + nxtR * np.array(nextObject['segment']) cur_segment = [[int(sublist[0]), int(sublist[1])] for sublist in cur_segment] cur = copy.deepcopy(baseObject) cur['bbox'] = cur_bbox cur['segment'] = cur_segment return cur def update_saved_models_json(cwd): """ Summary: Update the saved models json file. """ checkpoints_dir = cwd + "/mmdetection/checkpoints/" # list all the files in the checkpoints directory try: files = os.listdir(checkpoints_dir) except: # if checkpoints directory does not exist, create it os.mkdir(checkpoints_dir) with open(cwd + '/models_menu/models_json.json') as f: models_json = json.load(f) saved_models = {} # saved_models["YOLOv8x"] = {"checkpoint": "yolov8x-seg.pt", "config": "none"} for model in models_json: if model["Model"] != "SAM": if model["Checkpoint"].split("/")[-1] in os.listdir(checkpoints_dir): saved_models[model["Model Name"]] = { "id": model["id"], "checkpoint": model["Checkpoint"], "config": model["Config"]} with open(cwd + "/saved_models.json", "w") as f: json.dump(saved_models, f, indent=4) def delete_id_from_rec_and_traj(id, id_frames_rec, trajectories, frames): """ Summary: Delete an id from id_frames_rec and trajectories. Args: id: the id to delete id_frames_rec: a dictionary of id frames records Returns: """ # remove frames from id_frames_rec for this id id_frames_rec['id_' + str(id)] = id_frames_rec['id_' + str(id)] - set(frames) # remove frames from trajectories for this id for frame in frames: trajectories['id_' + str(id)][frame - 1] = (-1, -1) return id_frames_rec, trajectories def adjust_shapes_to_original_image(shapes, x1, y1, area_points): shape1 = [tuple([int(x[0]), int(x[1])]) for x in area_points] polygon1 = Polygon(shape1) final = [] for shape in shapes: shape['points'] = [shape['points'][i] + x1 if i % 2 == 0 else shape['points'][i] + y1 for i in range(len(shape['points']))] shape['bbox'] = [shape['bbox'][0] + x1, shape['bbox'][1] + y1, shape['bbox'][2] + x1, shape['bbox'][3] + y1] points = shape["points"] shape2 = [tuple([int(points[z]), int(points[z + 1])]) for z in range(0, len(points), 2)] polygon2 = Polygon(shape2) if polygon1.intersects(polygon2): final.append(shape) return final def track_area_adjustedBboex(area_points, dims, ratio = 0.1): [x1, y1, x2, y2] = get_bbox_xyxy(area_points) [w, h] = [x2 - x1, y2 - y1] x1 = int(max(0, x1 - w * ratio)) y1 = int(max(0, y1 - h * ratio)) x2 = int(min(dims[1], x2 + w * ratio)) y2 = int(min(dims[0], y2 + h * ratio)) return [x1, y1, x2, y2] def get_contour_length(contour): contour_start = contour contour_end = np.r_[contour[1:], contour[0:1]] return np.linalg.norm(contour_end - contour_start, axis=1).sum() def mask_to_polygons(mask, n_points=25, resize_factors=[1.0, 1.0]): mask = mask > 0.0 contours = skimage.measure.find_contours(mask) if len(contours) == 0: return [] contour = max(contours, key=get_contour_length) coords = skimage.measure.approximate_polygon( coords=contour, tolerance=np.ptp(contour, axis=0).max() / 100, ) coords = coords * resize_factors # convert coords from x y to y x coords = np.fliplr(coords) # segment_points are a list of coords segment_points = coords.astype(int) polygon = segment_points return polygon def polygon_to_shape(polygon, score, className="SAM instance"): shape = {} shape["label"] = className shape["content"] = str(round(score, 2)) shape["group_id"] = None shape["shape_type"] = "polygon" shape["bbox"] = get_bbox_xyxy(polygon) shape["flags"] = {} shape["other_data"] = {} # shape_points is result["seg"] flattened shape["points"] = [item for sublist in polygon for item in sublist] # print(shape) return shape def OURnms_confidenceBased(shapes, iou_threshold=0.5): """ Perform non-maximum suppression on a list of shapes based on their bounding boxes using IOU threshold. Args: shapes (list): List of shapes, each shape is a dictionary with keys (bbox, confidence, class_id) iou_threshold (float): IOU threshold for non-maximum suppression. Returns: list: List of shapes after performing non-maximum suppression, each shape is a dictionary with keys (bbox, confidence, class_id) """ iou_threshold = float(iou_threshold) for shape in shapes: if shape['content'] is None: shape['content'] = 1.0 # Sort shapes by their confidence shapes.sort(key=lambda x: x['content'], reverse=True) boxes, confidences, class_ids, segments = get_boxes_conf_classids_segments( shapes) toBeRemoved = [] # Loop through each shape for i in range(len(shapes)): shape_bbox = boxes[i] # Loop through each remaining shape for j in range(i + 1, len(shapes)): remaining_shape_bbox = boxes[j] # Compute IOU between shape and remaining_shape iou = compute_iou(shape_bbox, remaining_shape_bbox) # If IOU is greater than threshold, remove remaining_shape from shapes list if iou > iou_threshold: toBeRemoved.append(j) shapesFinal = [] boxesFinal = [] confidencesFinal = [] class_idsFinal = [] segmentsFinal = [] for i in range(len(shapes)): if i in toBeRemoved: continue shapesFinal.append(shapes[i]) boxesFinal, confidencesFinal, class_idsFinal, segmentsFinal = get_boxes_conf_classids_segments( shapesFinal) return shapesFinal, boxesFinal, confidencesFinal, class_idsFinal, segmentsFinal def OURnms_areaBased_fromSAM(self, sam_result, iou_threshold=0.5): iou_threshold = float(iou_threshold) # Sort shapes by their areas sortedResult = sorted(sam_result, key=lambda x: x['area'], reverse=True) masks = [ mask['segmentation'] for mask in sortedResult] scores = [mask['stability_score'] for mask in sortedResult] polygons = [mask_to_polygons(mask) for mask in masks] toBeRemoved = [] # Loop through each shape if iou_threshold > 0.99: for i in range(len(polygons)): shape1 = polygons[i] # Loop through each remaining shape for j in range(i + 1, len(sortedResult)): shape2 = polygons[j] # Compute IOU between shape and remaining_shape iou = compute_iou_exact(shape1, shape2) # If IOU is greater than threshold, remove remaining_shape from shapes list if iou > iou_threshold: toBeRemoved.append(j) shapes = [] for i in range(len(polygons)): if i in toBeRemoved: continue shapes.append(self.polygon_to_shape(polygons[i], scores[i], f'X{i}')) return shapes ================================================ FILE: DLTA_AI_app/labelme/utils/helpers/visualizations.py ================================================ import numpy as np import cv2 from .mathOps import * coco_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] # make a list of 12 unique colors as we will use them to draw bounding boxes of different classes in different colors # so the calor palette will be used to draw bounding boxes of different classes in different colors # the color pallette should have the famous 12 colors as red, green, blue, yellow, cyan, magenta, white, black, gray, brown, pink, and orange in bgr format def draw_bb_id(flags, image, x, y, w, h, id, conf, label, color=(0, 0, 255), thickness=1): if image is None: print("Image is None") return """ Summary: Draw bounding box and id on an image (Single id). Args: flags: a dictionary of flags (bbox, id, class) image: a cv2 image x: x coordinate of the bounding box y: y coordinate of the bounding box w: width of the bounding box h: height of the bounding box id: id of the shape label: label of the shape (class name) color: color of the bounding box thickness: thickness of the bounding box Returns: image: a cv2 image """ if flags['bbox']: image = cv2.rectangle( image, (x, y), (x + w, y + h), color, thickness + 1) if flags['id'] or flags['class'] or flags['conf']: text = '' if flags['id'] and flags['class']: text = f'#{id} [{label}]' if flags['id'] and not flags['class']: text = f'#{id}' if not flags['id'] and flags['class']: text = f'[{label}]' if flags['conf']: text = f'{text} {conf}' if len(text) > 0 else f'{conf}' fontscale = image.shape[0] / 2000 if fontscale < 0.3: fontscale = 0.3 elif fontscale > 5: fontscale = 5 text_width, text_height = cv2.getTextSize( text, cv2.FONT_HERSHEY_SIMPLEX, fontscale, thickness)[0] text_x = x + 10 text_y = y - 10 text_background_x1 = x text_background_y1 = y - 2 * 10 - text_height text_background_x2 = x + 2 * 10 + text_width text_background_y2 = y # fontscale is proportional to the image size cv2.rectangle( img=image, pt1=(text_background_x1, text_background_y1), pt2=(text_background_x2, text_background_y2), color=color, thickness=cv2.FILLED, ) cv2.putText( img=image, text=text, org=(text_x, text_y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=fontscale, color=(0, 0, 0), thickness=thickness, lineType=cv2.LINE_AA, ) # there is no bbox but there is id or class if (not flags['bbox']) and (flags['id'] or flags['class'] or flags['conf']): image = cv2.line(image, (x + int(w / 2), y + int(h / 2)), (x + 50, y - 5), color, thickness + 1) return image def draw_trajectories(trajectories, CurrentFrameIndex, flags, img, shapes): """ Summary: Draw trajectories on an image. Args: trajectories: a dictionary of trajectories CurrentFrameIndex: the current frame index flags: a dictionary of flags (traj, mask) img: a cv2 image shapes: a list of shapes Returns: img: a cv2 image """ x = trajectories['length'] for shape in shapes: id = shape["group_id"] pts_traj = trajectories['id_' + str(id)][max( CurrentFrameIndex - x, 0): CurrentFrameIndex] pts_poly = np.array([[x, y] for x, y in zip( shape["points"][0::2], shape["points"][1::2])]) color_poly = trajectories['id_color_' + str( id)] if flags['mask']: original_img = img.copy() if pts_poly is not None: cv2.fillPoly(img, pts=[pts_poly], color=color_poly) alpha = trajectories['alpha'] img = cv2.addWeighted(original_img, alpha, img, 1 - alpha, 0) for i in range(len(pts_traj) - 1, 0, - 1): thickness = (len(pts_traj) - i <= 10) * 1 + (len(pts_traj) - i <= 20) * 1 + (len(pts_traj) - i <= 30) * 1 + 3 # max_thickness = 6 # thickness = max(1, round(i / len(pts_traj) * max_thickness)) if pts_traj[i - 1] is None or pts_traj[i] is None: continue if pts_traj[i] == (-1, - 1) or pts_traj[i - 1] == (-1, - 1): break # color_traj = tuple(int(0.95 * x) for x in color_poly) color_traj = color_poly if flags['traj']: cv2.line(img, pts_traj[i - 1], pts_traj[i], color_traj, thickness) if ((len(pts_traj) - 1 - i) % 10 == 0): cv2.circle(img, pts_traj[i], 3, (0, 0, 0), -1) return img def draw_bb_on_image(trajectories, CurrentFrameIndex, flags, nTotalFrames, image, shapes, image_qt_flag=True): """ Summary: Draw bounding boxes and trajectories on an image (multiple ids). Args: trajectories: a dictionary of trajectories. CurrentFrameIndex: the current frame index. nTotalFrames: the total number of frames. image: a QT image or a cv2 image. shapes: a list of shapes. image_qt_flag: a flag to indicate if the image is a QT image or a cv2 image. Returns: img: a QT image or a cv2 image. """ img = image if image_qt_flag: img = convert_QT_to_cv(image) for shape in shapes: id = shape["group_id"] label = shape["label"] conf = shape["content"] # color calculation # idx = coco_classes.index(label) if label in coco_classes else -1 # idx = idx % len(color_palette) # color = color_palette[idx] if idx != -1 else (0, 0, 255) # label_hash = hash(label) # idx = abs(label_hash) % len(color_palette) label_ascii = sum([ord(c) for c in label]) idx = label_ascii % len(color_palette) color = color_palette[idx] (x1, y1, x2, y2) = shape["bbox"] x, y, w, h = int(x1), int(y1), int(x2 - x1), int(y2 - y1) img = draw_bb_id(flags, img, x, y, w, h, id, conf, label, color, thickness=1) center = (int((x1 + x2) / 2), int((y1 + y2) / 2)) try: centers_rec = trajectories['id_' + str(id)] try: (xp, yp) = centers_rec[CurrentFrameIndex - 2] (xn, yn) = center if (xp == -1 or xn == -1): c = 5 / 0 r = 0.5 x = r * xn + (1 - r) * xp y = r * yn + (1 - r) * yp center = (int(x), int(y)) except: pass centers_rec[CurrentFrameIndex - 1] = center trajectories['id_' + str(id)] = centers_rec trajectories['id_color_' + str(id)] = color except: centers_rec = [(-1, - 1)] * int(nTotalFrames) centers_rec[CurrentFrameIndex - 1] = center trajectories['id_' + str(id)] = centers_rec trajectories['id_color_' + str(id)] = color # print(sys.getsizeof(trajectories)) img = draw_trajectories(trajectories, CurrentFrameIndex, flags, img, shapes) if image_qt_flag: img = convert_cv_to_qt(img, ) return img def draw_bb_on_image_MODE(flags, image, shapes): """ Summary: Draw bounding boxes on an QT image (multiple ids) in MODE image. Args: flags: a dictionary of flags. image: a QT image. shapes: a list of shapes. Returns: img: a QT image. """ img = convert_QT_to_cv(image) for shape in shapes: label = shape["label"] if label == "SAM instance": continue conf = shape["content"] pts_poly = np.array([[x, y] for x, y in zip( shape["points"][0::2], shape["points"][1::2])]) # color calculation # idx = coco_classes.index(label) if label in coco_classes else -1 # idx = idx % len(color_palette) # color = color_palette[idx] if idx != -1 else (0, 0, 255) # label_hash = hash(label) # idx = abs(label_hash) % len(color_palette) label_ascii = sum([ord(c) for c in label]) idx = label_ascii % len(color_palette) color = color_palette[idx] (x1, y1, x2, y2) = shape["bbox"] x, y, w, h = int(x1), int(y1), int(x2 - x1), int(y2 - y1) img = draw_bb_label_on_image_MODE(flags, img, x, y, w, h, label, conf, color, thickness=1) if flags['mask']: original_img = img.copy() if pts_poly is not None: cv2.fillPoly(img, pts=[pts_poly], color=color) alpha = 0.70 img = cv2.addWeighted(original_img, alpha, img, 1 - alpha, 0) img = convert_cv_to_qt(img, ) return img def draw_bb_label_on_image_MODE(flags, image, x, y, w, h, label, conf, color=(0, 0, 255), thickness=1): if image is None: print("Image is None") return """ Summary: Draw bounding box and id on an image (Single id). Args: flags: a dictionary of flags (bbox, id, class) image: a cv2 image x: x coordinate of the bounding box y: y coordinate of the bounding box w: width of the bounding box h: height of the bounding box label: label of the shape (class name) color: color of the bounding box thickness: thickness of the bounding box Returns: image: a cv2 image """ if flags['bbox']: image = cv2.rectangle( image, (x, y), (x + w, y + h), color, thickness + 1) if flags['conf'] or flags['class']: if flags['conf'] and flags['class']: text = f'[{label}] {conf}' if flags['conf'] and not flags['class']: text = f'{conf}' if not flags['conf'] and flags['class']: text = f'[{label}]' fontscale = image.shape[0] / 2000 if fontscale < 0.3: fontscale = 0.3 elif fontscale > 5: fontscale = 5 text_width, text_height = cv2.getTextSize( text, cv2.FONT_HERSHEY_SIMPLEX, fontscale, thickness)[0] text_x = x + 10 text_y = y - 10 text_background_x1 = x text_background_y1 = y - 2 * 10 - text_height text_background_x2 = x + 2 * 10 + text_width text_background_y2 = y # fontscale is proportional to the image size cv2.rectangle( img=image, pt1=(text_background_x1, text_background_y1), pt2=(text_background_x2, text_background_y2), color=color, thickness=cv2.FILLED, ) cv2.putText( img=image, text=text, org=(text_x, text_y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=fontscale, color=(0, 0, 0), thickness=thickness, lineType=cv2.LINE_AA, ) # there is no bbox but there is id or class if (not flags['bbox']) and (flags['conf'] or flags['class']): image = cv2.line(image, (x + int(w / 2), y + int(h / 2)), (x + 50, y - 5), color, thickness + 1) return image ================================================ FILE: DLTA_AI_app/labelme/utils/image.py ================================================ import base64 import io import numpy as np import PIL.ExifTags import PIL.Image import PIL.ImageOps def img_data_to_pil(img_data): f = io.BytesIO() f.write(img_data) img_pil = PIL.Image.open(f) return img_pil def img_data_to_arr(img_data): img_pil = img_data_to_pil(img_data) img_arr = np.array(img_pil) return img_arr def img_b64_to_arr(img_b64): img_data = base64.b64decode(img_b64) img_arr = img_data_to_arr(img_data) return img_arr def img_pil_to_data(img_pil): f = io.BytesIO() img_pil.save(f, format="PNG") img_data = f.getvalue() return img_data def img_arr_to_b64(img_arr): img_pil = PIL.Image.fromarray(img_arr) f = io.BytesIO() img_pil.save(f, format="PNG") img_bin = f.getvalue() if hasattr(base64, "encodebytes"): img_b64 = base64.encodebytes(img_bin) else: img_b64 = base64.encodestring(img_bin) return img_b64 def img_data_to_png_data(img_data): with io.BytesIO() as f: f.write(img_data) img = PIL.Image.open(f) with io.BytesIO() as f: img.save(f, "PNG") f.seek(0) return f.read() def apply_exif_orientation(image): try: exif = image._getexif() except AttributeError: exif = None if exif is None: return image exif = { PIL.ExifTags.TAGS[k]: v for k, v in exif.items() if k in PIL.ExifTags.TAGS } orientation = exif.get("Orientation", None) if orientation == 1: # do nothing return image elif orientation == 2: # left-to-right mirror return PIL.ImageOps.mirror(image) elif orientation == 3: # rotate 180 return image.transpose(PIL.Image.ROTATE_180) elif orientation == 4: # top-to-bottom mirror return PIL.ImageOps.flip(image) elif orientation == 5: # top-to-left mirror return PIL.ImageOps.mirror(image.transpose(PIL.Image.ROTATE_270)) elif orientation == 6: # rotate 270 return image.transpose(PIL.Image.ROTATE_270) elif orientation == 7: # top-to-right mirror return PIL.ImageOps.mirror(image.transpose(PIL.Image.ROTATE_90)) elif orientation == 8: # rotate 90 return image.transpose(PIL.Image.ROTATE_90) else: return image ================================================ FILE: DLTA_AI_app/labelme/utils/model_explorer.py ================================================ from PyQt6 import QtWidgets, QtCore, QtGui from PyQt6.QtWidgets import QDialog, QToolBar, QTableWidget, QTableWidgetItem, QVBoxLayout, QHBoxLayout, QComboBox, QCheckBox, QPushButton, QProgressDialog, QApplication, QWidget import json import urllib.request import requests import os import time # store json file into list of dictionaries cwd = os.getcwd() with open(cwd + '/models_menu/models_json.json') as f: models_json = json.load(f) class ModelExplorerDialog(QDialog): """ A dialog window for exploring available models and downloading them. Attributes: main_window (QMainWindow): The main window of the application. mute (bool): Whether to mute notifications or not. notification (function): A function for displaying notifications. """ def __init__(self, main_window=None, mute=None, notification=None): """ Initializes the ModelExplorerDialog. Args: main_window (QMainWindow): The main window of the application. mute (bool): Whether to mute notifications. notification (function): A function for displaying notifications. """ super().__init__() self.main_window = main_window self.mute = mute self.notification = notification self.setWindowTitle("Model Explorer") self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) # Define the columns of the table self.cols_labels = ["id", "Model Name", "Backbone", "Lr schd", "Memory (GB)", "Inference Time (fps)", "box AP", "mask AP", "Checkpoint Size (MB)"] # Get the unique model names self.model_keys = sorted( list(set([model['Model'] for model in models_json]))) # Set up the layout layout = QVBoxLayout() self.setLayout(layout) # Set up the toolbar toolbar = QToolBar() layout.addWidget(toolbar) # Set up the model type dropdown menu self.model_type_dropdown = QComboBox() self.model_type_dropdown.addItems(["All"] + self.model_keys) self.model_type_dropdown.currentIndexChanged.connect(self.search) toolbar.addWidget(self.model_type_dropdown) # Set up the checkboxes self.available_checkbox = QCheckBox("Downloaded") self.available_checkbox.clicked.connect(self.search) toolbar.addWidget(self.available_checkbox) self.not_available_checkbox = QCheckBox("Not Downloaded") self.not_available_checkbox.clicked.connect(self.search) toolbar.addWidget(self.not_available_checkbox) # Set up the search button # search_button = QPushButton("Search") # search_button.clicked.connect(self.search) # toolbar.addWidget(search_button) # Set up the button for opening the checkpoints directory open_checkpoints_dir_button = QPushButton("Open Checkpoints Dir") # add icon to the button open_checkpoints_dir_button.setIcon( QtGui.QIcon(cwd + '/labelme/icons/downloads.png')) open_checkpoints_dir_button.setIconSize(QtCore.QSize(20, 20)) open_checkpoints_dir_button.clicked.connect( self.open_checkpoints_dir) toolbar.addWidget(open_checkpoints_dir_button) # Set spacing layout.setSpacing(10) # Set up the table self.table = QTableWidget() layout.addWidget(self.table) # Set up the number of rows and columns self.num_rows = len(models_json) self.num_cols = 9 # Make availability list self.check_availability() # Populate the table with default data self.populate_table() # Set up the submit and cancel buttons button_layout = QHBoxLayout() layout.addLayout(button_layout) close_button = QPushButton("Ok") close_button.clicked.connect(self.close) # add side padding to the button close_button.setFixedWidth(100) # make the button in the middle of the layout, don't stretch button_layout.addStretch() button_layout.addWidget(close_button) button_layout.addStretch() # layout spacing layout.setSpacing(10) def populate_table(self): """ Populates the table with data from models_json. Returns: None """ # Clear the table (keep the header labels) self.table.clearContents() self.table.setRowCount(self.num_rows) # +2 for the available cell and select row button self.table.setColumnCount(self.num_cols + 2) # Set the header labels header = self.table.horizontalHeader() self.table.setHorizontalHeaderLabels( self.cols_labels + ["Status", "Select Model"]) header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeMode.ResizeToContents) # remove vertical header self.table.verticalHeader().setVisible(False) self.table.horizontalHeader().setSectionResizeMode( QtWidgets.QHeaderView.ResizeMode.ResizeToContents) # Populate the table with data row_count = 0 for model in models_json: col_count = 0 for key in self.cols_labels: item = QTableWidgetItem(f"{model[key]}") item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.table.setItem(row_count, col_count, item) col_count += 1 # Select Model column self.selected_model = (-1, -1, -1) select_row_button = QPushButton("Select Model") select_row_button.clicked.connect(self.select_model) self.table.setContentsMargins(10, 10, 10, 10) self.table.setCellWidget(row_count, 10, select_row_button) # Downloaded column if model["Downloaded"]: available_item = QTableWidgetItem("Downloaded") # make the text color dark green available_item.setForeground(QtCore.Qt.GlobalColor.darkGreen) self.table.setItem(row_count, 9, available_item) available_item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) else: available_item = QPushButton("Requires Download") available_item.clicked.connect( self.create_download_callback(model["id"])) # add padding to button available_item.setContentsMargins(10, 10, 10, 10) # maek the button text color red available_item.setStyleSheet("color: red") self.table.setCellWidget(row_count, 9, available_item) # make select_row_button disabled select_row_button.setEnabled(False) # Disable SAM Selection if model["Model"] == "SAM": select_row_button.setEnabled(False) # change text select_row_button.setText("Select from SAM Toolbar") row_count += 1 def search(self): """ Filters the table based on the selected model type and availability. Returns: None """ # Get the selected model type and availability model_type = self.model_type_dropdown.currentText() available = self.available_checkbox.isChecked() not_available = self.not_available_checkbox.isChecked() # Iterate over each row in the table for row in range(self.num_rows): show_row = True # Filter by model type if model_type != "All": id = int(self.table.item(row, 0).text()) if models_json[id]["Model"] != model_type: show_row = False # Filter by availability if available or not_available: available_text = self.table.item(row, 9) try: available_text = available_text.text() except AttributeError: pass if available and available_text != "Downloaded": show_row = False if not_available and available_text == "Downloaded": show_row = False # Hide or show the row based on the filters self.table.setRowHidden(row, not show_row) def select_model(self): """ Gets the selected model from the table and sets it as the selected model. Returns: None """ # Get the button that was clicked sender = self.sender() # Get the row index of the button in the table index = self.table.indexAt(sender.pos()) # Get the model id from the row index row = index.row() model_id = int(self.table.item(row, 0).text()) # Set the selected model as the model with this id self.selected_model = models_json[model_id]["Model Name"], models_json[model_id]["Config"], models_json[model_id]["Checkpoint"], self.accept() def download_model(self, id): """ Downloads the model with the given id and updates the progress dialog. Args: id (int): The id of the model to download. Returns: None """ # Get the checkpoint link and model name for the model with this id checkpoint_link = models_json[id]["Checkpoint_link"] model_name = models_json[id]["Model Name"] # Create a progress dialog self.progress_dialog = QProgressDialog( f"Downloading {model_name}...", "Cancel", 0, 100, self) # Set the window title self.progress_dialog.setWindowTitle("Downloading Model") self.progress_dialog.setWindowModality(QtCore.Qt.WindowModality.WindowModal) self.progress_dialog.canceled.connect(self.cancel_download) self.progress_dialog.show() # Initialize variables for tracking download progress self.start_time = time.time() self.last_time = self.start_time self.last_downloaded = 0 self.download_canceled = False def handle_progress(block_num, block_size, total_size): """ Updates the progress dialog with the current download progress. Args: block_num (int): The number of blocks downloaded. block_size (int): The size of each block. total_size (int): The total size of the file being downloaded. Returns: None """ # failed flag # Calculate the download progress read_data = block_num * block_size if total_size > 0: download_percentage = read_data * 100 / total_size self.progress_dialog.setValue(download_percentage) self.progress_dialog.setLabelText(f"Downloading {model_name}... ") QApplication.processEvents() failed = False try: # Download the file using requests response = requests.get(checkpoint_link, stream=True) total_size = int(response.headers.get('content-length', 0)) block_size = 1024 block_num = 0 # Save the downloaded file to disk file_path = f"{cwd}/mmdetection/checkpoints/{checkpoint_link.split('/')[-1]}" with open(file_path, 'wb') as f: for data in response.iter_content(block_size): if self.download_canceled: break f.write(data) block_num += 1 handle_progress(block_num, block_size, total_size) if self.download_canceled: # Delete the file if the download was canceled os.remove(file_path) print("Download canceled by user") failed = True except Exception as e: os.remove(file_path) print(f"Download error: {e}") failed = True # Close the progress dialog and update the table self.progress_dialog.close() self.check_availability() self.populate_table() print("Download finished") # Show a notification if the main window is not active try: if not self.mute: if not self.isActiveWindow(): if not failed: self.notification(f"{model_name} has been downloaded successfully") else: self.notification(f"Failed to download {model_name}") except: pass def cancel_download(self): """ Sets the download_canceled flag to True to cancel the download. Returns: None """ self.download_canceled = True def create_download_callback(self, model_id): """ Returns a lambda function that downloads the model with the given id. Args: model_id (int): The id of the model to download. Returns: function: A lambda function that downloads the model with the given id. """ return lambda: self.download_model(model_id) def check_availability(self): """ Checks the availability of each model in the table and updates the "Downloaded" column. Returns: None """ checkpoints_dir = cwd + "/mmdetection/checkpoints/" for model in models_json: if model["Checkpoint"].split("/")[-1] in os.listdir(checkpoints_dir): model["Downloaded"] = True else: model["Downloaded"] = False def open_checkpoints_dir(self): """ Opens the directory containing the downloaded checkpoints in the file explorer. Returns: None """ url = QtCore.QUrl.fromLocalFile(cwd + "/mmdetection/checkpoints/") if not QtGui.QDesktopServices.openUrl(url): # Print an error message if opening failed print("Failed to open checkpoints directory") ================================================ FILE: DLTA_AI_app/labelme/utils/qt.py ================================================ from math import sqrt import os.path as osp import numpy as np from PyQt6 import QtCore from PyQt6 import QtGui from PyQt6 import QtWidgets here = osp.dirname(osp.abspath(__file__)) def newIcon(icon): icons_dir = osp.join(here, "../icons") return QtGui.QIcon(osp.join(":/", icons_dir, "%s.png" % icon)) def newButton(text, icon=None, slot=None): b = QtWidgets.QPushButton(text) if icon is not None: b.setIcon(newIcon(icon)) if slot is not None: b.clicked.connect(slot) return b def newAction( parent, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, enabled=True, checked=False, ): """Create a new action and assign callbacks, shortcuts, etc.""" a = QtGui.QAction(text, parent) if icon is not None: a.setIconText(text.replace(" ", "\n")) a.setIcon(newIcon(icon)) if shortcut is not None: if isinstance(shortcut, (list, tuple)): a.setShortcuts(shortcut) else: a.setShortcut(shortcut) if tip is not None: a.setToolTip(tip) a.setStatusTip(tip) if slot is not None: a.triggered.connect(slot) if checkable: a.setCheckable(True) a.setEnabled(enabled) a.setChecked(checked) return a def addActions(widget, actions): for action in actions: if action is None: widget.addSeparator() elif isinstance(action, QtWidgets.QMenu): widget.addMenu(action) else: widget.addAction(action) def labelValidator(): return QtGui.QRegularExpressionValidator(QtCore.QRegularExpression(r"^[^ \t].+")) class struct(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) def distance(p): return sqrt(p.x() * p.x() + p.y() * p.y()) def distancetoline(point, line): p1, p2 = line p1 = np.array([p1.x(), p1.y()]) p2 = np.array([p2.x(), p2.y()]) p3 = np.array([point.x(), point.y()]) if np.dot((p3 - p1), (p2 - p1)) < 0: return np.linalg.norm(p3 - p1) if np.dot((p3 - p2), (p1 - p2)) < 0: return np.linalg.norm(p3 - p2) if np.linalg.norm(p2 - p1) == 0: return 0 return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1) def fmtShortcut(text): mod, key = text.split("+", 1) return "%s+%s" % (mod, key) ================================================ FILE: DLTA_AI_app/labelme/utils/sam.py ================================================ from segment_anything import sam_model_registry, SamPredictor, SamAutomaticMaskGenerator import numpy as np import torch from .helpers import mathOps # create a sam predictor class with funcions to predict and visualize and results class Sam_Predictor(): def __init__(self, model_type, checkpoint_path, device): self.model_type = model_type self.checkpoint_path = checkpoint_path self.device = device self.model = sam_model_registry[model_type](checkpoint=checkpoint_path) self.model.to(device = self.device) self.predictor = SamPredictor(self.model) self.image = None self.mask_logit = None def set_new_image(self, image): self.image = image self.predictor.set_image(image) def clear_logit(self): self.mask_logit = None def predict(self, point_coords=None, point_labels=None, box=None, multimask_output=True, image=None): # print(point_coords , point_labels) # print(f'----------------------- into SAM predict') # print(f'point_coords: {point_coords}, point_labels: {point_labels}, box: {box}') if box is None: # print(f'----------------------- no boxes') if self.mask_logit is None: masks, scores, logits = self.predictor.predict(point_coords=point_coords, point_labels=point_labels, multimask_output=multimask_output) else: masks, scores, logits = self.predictor.predict(point_coords=point_coords, point_labels=point_labels, mask_input=self.mask_logit[None, :, :], multimask_output=multimask_output) else: # print(f'----------------------- boxes') if len(box) == 1: # print(f'----------------------- only one box') input_box = np.array(box[0]) masks, scores, logits = self.predictor.predict(point_coords=point_coords, point_labels=point_labels, box=input_box[None, :], multimask_output=multimask_output) else: # print(f'----------------------- multiple boxes') input_box = np.array(box[0]) box_tensor = torch.tensor(box, device=self.predictor.device) box_transformed = self.predictor.transform.apply_boxes_torch(box_tensor, image.shape[:2]) masks, scores, logits = self.predictor.predict_torch(point_coords=None, point_labels=None, boxes=box_transformed, multimask_output=False) if multimask_output: if box is not None and len(box) != 1: logits = torch.Tensor.cpu(logits).numpy().reshape(-1, logits.shape[-2], logits.shape[-1]) masks = torch.Tensor.cpu(masks).numpy().reshape(-1, masks.shape[-2], masks.shape[-1]) scores = torch.Tensor.cpu(scores).numpy().reshape(-1) self.mask_logit = logits[np.argmax(scores), :, :] # Choose the model's best mask logit mask = masks[np.argmax(scores), :, :] # Choose the model's best mask score = np.max(scores) # Choose the model's best score return mask, score def predict_batch(self, boxes=None, image=None): boxes = np.array(boxes) input_boxes = torch.tensor(boxes, device=self.predictor.device) transformed_boxes = self.predictor.transform.apply_boxes_torch(input_boxes, image.shape[:2]) masks, scores, logits = self.predictor.predict_torch( point_coords=None, point_labels=None, boxes=transformed_boxes, multimask_output=False, ) return masks, scores def check_image(self , new_image): if not np.array_equal(self.image, new_image): # print("image changed_1") self.mask_logit = None self.image = new_image self.predictor.set_image(new_image) # print("image changed_2") return False return True def get_all_shapes(self, image, iou_threshold): # self.mask_generator = SamAutomaticMaskGenerator( # model: Sam, # points_per_side: Optional[int] = 32, # points_per_batch: int = 64, # pred_iou_thresh: float = 0.88, # stability_score_thresh: float = 0.95, # stability_score_offset: float = 1.0, # box_nms_thresh: float = 0.7, # crop_n_layers: int = 0, # crop_nms_thresh: float = 0.7, # crop_overlap_ratio: float = 512 / 1500, # crop_n_points_downscale_factor: int = 1, # point_grids: Optional[List[np.ndarray]] = None, # min_mask_region_area: int = 0, # output_mode: str = "binary_mask", # ) self.mask_generator = SamAutomaticMaskGenerator( model = self.model, # points_per_side = 32, # points_per_batch = 64, # pred_iou_thresh = 0.88, # stability_score_thresh = 0.95, # stability_score_offset = 1.0, # box_nms_thresh = 0.3, # crop_n_layers = 0, # crop_nms_thresh = 0.7, # crop_overlap_ratio = 512 / 1500, # crop_n_points_downscale_factor = 1, # point_grids = None, # min_mask_region_area = image.shape[0] * image.shape[1] * 0.0005, # output_mode = "binary_mask", ) # Arguments( # model (Sam): The SAM model to use for mask prediction. # points_per_side (int or None): The number of points to be sampled # along one side of the image. The total number of points is # points_per_side**2. If None, 'point_grids' must provide explicit # point sampling. # points_per_batch (int): Sets the number of points run simultaneously # by the model. Higher numbers may be faster but use more GPU memory. # pred_iou_thresh (float): A filtering threshold in [0,1], using the # model's predicted mask quality. # stability_score_thresh (float): A filtering threshold in [0,1], using # the stability of the mask under changes to the cutoff used to binarize # the model's mask predictions. # stability_score_offset (float): The amount to shift the cutoff when # calculated the stability score. # box_nms_thresh (float): The box IoU cutoff used by non-maximal # suppression to filter duplicate masks. # crop_n_layers (int): If >0, mask prediction will be run again on # crops of the image. Sets the number of layers to run, where each # layer has 2**i_layer number of image crops. # crop_nms_thresh (float): The box IoU cutoff used by non-maximal # suppression to filter duplicate masks between different crops. # crop_overlap_ratio (float): Sets the degree to which crops overlap. # In the first crop layer, crops will overlap by this fraction of # the image length. Later layers with more crops scale down this overlap. # crop_n_points_downscale_factor (int): The number of points-per-side # sampled in layer n is scaled down by crop_n_points_downscale_factor**n. # point_grids (list(np.ndarray) or None): A list over explicit grids # of points used for sampling, normalized to [0,1]. The nth grid in the # list is used in the nth crop layer. Exclusive with points_per_side. # min_mask_region_area (int): If >0, postprocessing will be applied # to remove disconnected regions and holes in masks with area smaller # than min_mask_region_area. Requires opencv. # output_mode (str): The form masks are returned in. Can be 'binary_mask', # 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. # For large resolutions, 'binary_mask' may consume large amounts of # memory. # ) # sam_result is a list of dictionaries # each dictionary (mask) has the following keys: # segmentation - [np.ndarray] - the mask with (W, H) shape, and bool type # area - [int] - the area of the mask in pixels # bbox - [List[int]] - the boundary box of the mask in xywh format # predicted_iou - [float] - the model's own prediction for the quality of the mask # point_coords - [List[List[float]]] - the sampled input point that generated this mask # stability_score - [float] - an additional measure of mask quality # crop_box - List[int] - the crop of the image used to generate this mask in xywh format sam_result = self.mask_generator.generate(image) shapes = mathOps.OURnms_areaBased_fromSAM(sam_result, iou_threshold=iou_threshold) # with AREA not score return shapes ================================================ FILE: DLTA_AI_app/labelme/utils/shape.py ================================================ import math import uuid import numpy as np import PIL.Image import PIL.ImageDraw from labelme.logger import logger def polygons_to_mask(img_shape, polygons, shape_type=None): logger.warning( "The 'polygons_to_mask' function is deprecated, " "use 'shape_to_mask' instead." ) return shape_to_mask(img_shape, points=polygons, shape_type=shape_type) def shape_to_mask( img_shape, points, shape_type=None, line_width=10, point_size=5 ): mask = np.zeros(img_shape[:2], dtype=np.uint8) mask = PIL.Image.fromarray(mask) draw = PIL.ImageDraw.Draw(mask) xy = [tuple(point) for point in points] if shape_type == "circle": assert len(xy) == 2, "Shape of shape_type=circle must have 2 points" (cx, cy), (px, py) = xy d = math.sqrt((cx - px) ** 2 + (cy - py) ** 2) draw.ellipse([cx - d, cy - d, cx + d, cy + d], outline=1, fill=1) elif shape_type == "rectangle": assert len(xy) == 2, "Shape of shape_type=rectangle must have 2 points" draw.rectangle(xy, outline=1, fill=1) elif shape_type == "line": assert len(xy) == 2, "Shape of shape_type=line must have 2 points" draw.line(xy=xy, fill=1, width=line_width) elif shape_type == "linestrip": draw.line(xy=xy, fill=1, width=line_width) elif shape_type == "point": assert len(xy) == 1, "Shape of shape_type=point must have 1 points" cx, cy = xy[0] r = point_size draw.ellipse([cx - r, cy - r, cx + r, cy + r], outline=1, fill=1) else: assert len(xy) > 2, "Polygon must have points more than 2" draw.polygon(xy=xy, outline=1, fill=1) mask = np.array(mask, dtype=bool) return mask def shapes_to_label(img_shape, shapes, label_name_to_value): cls = np.zeros(img_shape[:2], dtype=np.int32) ins = np.zeros_like(cls) instances = [] for shape in shapes: points = shape["points"] label = shape["label"] group_id = shape.get("group_id") if group_id is None: group_id = uuid.uuid1() shape_type = shape.get("shape_type", None) cls_name = label instance = (cls_name, group_id) if instance not in instances: instances.append(instance) ins_id = instances.index(instance) + 1 cls_id = label_name_to_value[cls_name] mask = shape_to_mask(img_shape[:2], points, shape_type) cls[mask] = cls_id ins[mask] = ins_id return cls, ins def labelme_shapes_to_label(img_shape, shapes): logger.warn( "labelme_shapes_to_label is deprecated, so please use " "shapes_to_label." ) label_name_to_value = {"_background_": 0} for shape in shapes: label_name = shape["label"] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value lbl, _ = shapes_to_label(img_shape, shapes, label_name_to_value) return lbl, label_name_to_value def masks_to_bboxes(masks): if masks.ndim != 3: raise ValueError( "masks.ndim must be 3, but it is {}".format(masks.ndim) ) if masks.dtype != bool: raise ValueError( "masks.dtype must be bool type, but it is {}".format(masks.dtype) ) bboxes = [] for mask in masks: where = np.argwhere(mask) (y1, x1), (y2, x2) = where.min(0), where.max(0) + 1 bboxes.append((y1, x1, y2, x2)) bboxes = np.asarray(bboxes, dtype=np.float32) return bboxes ================================================ FILE: DLTA_AI_app/labelme/utils/vid_to_frames.py ================================================ import os import sys import cv2 from PyQt6.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QFileDialog, QSlider, QLineEdit, QVBoxLayout, QHBoxLayout, QDialog, QProgressBar from PyQt6.QtCore import Qt from PyQt6.QtGui import QFont from PyQt6 import QtWidgets import qdarktheme class VideoFrameExtractor(QDialog): def __init__(self, mute = None, notification = None): super().__init__() self.mute = mute self.notification = notification # set minimum window size self.setMinimumSize(500, 300) self.setWindowTitle("Open Video as Frames") self.setWindowFlags(self.windowFlags() & ~Qt.WindowType.WindowContextHelpButtonHint) self.sampling_max = 100 # Initialize variables self.vid_path = None self.sampling_rate = 1 self.start_frame = 1 self.end_frame = None self.fps = None self.stop = False self.path_name = None font = QFont() font.setBold(True) # Create widgets self.file_label = QLabel("Select a video file:") self.file_button = QPushButton("Open Video") self.file_button.clicked.connect(self.select_file) self.sampling_label = QLabel("Sampling rate:") self.sampling_slider = QSlider() self.sampling_slider.setOrientation(Qt.Orientation.Horizontal) self.sampling_slider.setRange(1, self.sampling_max) self.sampling_slider.setValue(1) self.sampling_slider.setEnabled(False) self.sampling_slider.valueChanged.connect(self.update_sampling_rate) self.sampling_edit = QLineEdit(str(self.sampling_slider.value())) self.sampling_edit.setFont(QFont('Arial', 10)) self.sampling_edit.setAlignment(Qt.AlignmentFlag.AlignCenter) self.sampling_edit.setEnabled(False) self.sampling_edit.textChanged.connect(self.update_sampling_slider) self.sampling_time_label = QLabel("hh:mm:ss") self.sampling_time_label.setFont(font) self.sampling_time_label.setAlignment(Qt.AlignmentFlag.AlignRight) self.start_label = QLabel("Start frame:") self.start_slider = QSlider() self.start_slider.setOrientation(Qt.Orientation.Horizontal) self.start_slider.setRange(0, 1000) self.start_slider.setValue(0) self.start_slider.setEnabled(False) self.start_slider.valueChanged.connect(self.update_start_frame) self.start_edit = QLineEdit(str(self.start_slider.value())) self.start_edit.setFont(QFont('Arial', 10)) self.start_edit.setAlignment(Qt.AlignmentFlag.AlignCenter) self.start_edit.setEnabled(False) self.start_edit.textChanged.connect(self.update_start_slider) self.start_time_label = QLabel("hh:mm:ss") self.start_time_label.setFont(font) self.start_time_label.setAlignment(Qt.AlignmentFlag.AlignRight) self.end_label = QLabel("End frame:") self.end_slider = QSlider() self.end_slider.setOrientation(Qt.Orientation.Horizontal) self.end_slider.setRange(0, 1) self.end_slider.setValue(1) self.end_slider.setEnabled(False) self.end_slider.valueChanged.connect(self.update_end_frame) self.end_edit = QLineEdit(str(self.end_slider.value())) self.end_edit.setFont(QFont('Arial', 10)) self.end_edit.setAlignment(Qt.AlignmentFlag.AlignCenter) self.end_edit.setEnabled(False) self.end_edit.textChanged.connect(self.update_end_slider) self.end_time_label = QLabel("hh:mm:ss") self.end_time_label.setFont(font) self.end_time_label.setAlignment(Qt.AlignmentFlag.AlignRight) self.extract_button = QPushButton("Extract Frames") self.extract_button.clicked.connect(self.extract_frames) self.extract_button.setEnabled(False) self.stop_button = QPushButton("Stop") self.stop_button.pressed.connect(self.stop_extraction) self.stop_button.setEnabled(False) self.progress_bar = QProgressBar(self) self.progress_bar.setGeometry(50, 150, 300, 20) self.progress_bar.setFormat("Waiting for extraction...") self.progress_bar.setValue(0) # Create layouts file_layout = QHBoxLayout() file_layout.addWidget(self.file_label) file_layout.addWidget(self.file_button) sampling_layout = QHBoxLayout() inner_sampling_layout = QVBoxLayout() inner_sampling_layout.addWidget(self.sampling_label) inner_sampling_layout.addWidget(self.sampling_time_label) sampling_layout.addLayout(inner_sampling_layout) inner_sampling_layout = QVBoxLayout() inner_sampling_layout.addWidget(self.sampling_edit) inner_sampling_layout.addWidget(self.sampling_slider) sampling_layout.addLayout(inner_sampling_layout) range_layout = QHBoxLayout() start_layout = QHBoxLayout() inner_start_layout = QVBoxLayout() inner_start_layout.addWidget(self.start_label, alignment=Qt.AlignmentFlag.AlignLeft) inner_start_layout.addWidget(self.start_time_label, alignment=Qt.AlignmentFlag.AlignLeft) start_layout.addLayout(inner_start_layout) inner_start_layout = QVBoxLayout() inner_start_layout.addWidget(self.start_edit) inner_start_layout.addWidget(self.start_slider) start_layout.addLayout(inner_start_layout) end_layout = QHBoxLayout() inner_end_layout = QVBoxLayout() inner_end_layout.addWidget(self.end_label) inner_end_layout.addWidget(self.end_time_label) end_layout.addLayout(inner_end_layout) inner_end_layout = QVBoxLayout() inner_end_layout.addWidget(self.end_edit) inner_end_layout.addWidget(self.end_slider) end_layout.addLayout(inner_end_layout) range_layout.addLayout(start_layout) end_layout.setContentsMargins(20, 0, 0, 0) range_layout.addLayout(end_layout) button_layout = QHBoxLayout() button_layout.addWidget(self.extract_button) button_layout.addWidget(self.stop_button) main_layout = QVBoxLayout() main_layout.addLayout(file_layout) range_layout.setContentsMargins(0, 20, 0, 0) main_layout.addLayout(range_layout) main_layout.addLayout(sampling_layout) main_layout.addLayout(button_layout) main_layout.addWidget(self.progress_bar) # Set the main layout self.setLayout(main_layout) def select_file(self): # Open a file dialog to select a video file file_path, _ = QFileDialog.getOpenFileName(self, "Video to Frames", "", "Video Files (*.mp4 *.avi *.mov)") if file_path: self.vid_path = file_path self.file_label.setText(f"Selected video file: {self.vid_path}") self.sampling_slider.setEnabled(True) self.sampling_edit.setEnabled(True) self.start_slider.setEnabled(True) self.start_edit.setEnabled(True) self.end_slider.setEnabled(True) self.end_edit.setEnabled(True) self.extract_button.setEnabled(True) self.stop_button.setEnabled(True) # Set the stop button to red self.stop_button.setStyleSheet("background-color: red; color: white;") # Open the video file vidcap = cv2.VideoCapture(self.vid_path) self.fps = vidcap.get(cv2.CAP_PROP_FPS) # Set the maximum value of the start and end sliders to the total number of frames in the video self.max_frame = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) # Set the start and end sliders to the maximum value self.start_slider.setMaximum(self.max_frame) self.start_time_label.setText(self.get_time_string(0)) # update startedit and start time self.end_slider.setMaximum(self.max_frame) self.end_slider.setValue(self.max_frame) # update endedit and end time self.end_edit.setText(str(self.end_slider.value())) self.end_time_label.setText(self.get_time_string(self.max_frame / self.fps)) # update sampling self.sampling_time_label.setText(self.get_time_string(1 / self.fps)) self.sampling_slider.setMaximum(self.max_frame // 10) self.sampling_slider.setValue(self.max_frame // 100) self.sampling_max = self.max_frame // 10 else: self.file_label.setText("No video is selected") self.sampling_slider.setEnabled(False) self.sampling_edit.setEnabled(False) self.start_slider.setEnabled(False) self.start_edit.setEnabled(False) self.end_slider.setEnabled(False) self.end_edit.setEnabled(False) def update_sampling_rate(self, value): # Update the sampling rate when the slider is moved self.sampling_rate = value self.sampling_edit.setText(str(value)) def update_sampling_slider(self, text): # Update the sampling rate when the edit box is changed try: value = int(text) if value < 1: value = 1 elif value > self.sampling_max: value = self.sampling_max self.sampling_rate = value self.sampling_slider.setValue(value) if self.fps: self.sampling_time_label.setText(self.get_time_string(value / self.fps)) if self.end_frame is not None: self.progress_bar.setFormat(f"Will Extract {(self.end_frame - self.start_frame) // self.sampling_rate} Frames") except ValueError: pass def update_start_frame(self, value): # Update the start frame when the slider is moved self.start_frame = value self.start_edit.setText(str(value)) def update_start_slider(self, text): # Update the start frame when the edit box is changed try: value = int(text) if value < 0: value = 0 elif self.end_frame is not None and value > self.end_frame: self.start_slider.setValue(self.end_frame) value = self.end_frame self.start_frame = value self.start_slider.setValue(value) if self.fps: self.start_time_label.setText(self.get_time_string(value / self.fps)) if self.end_frame is not None: self.progress_bar.setFormat(f"Will Extract {(self.end_frame - self.start_frame) // self.sampling_rate} Frames") except ValueError: pass def update_end_frame(self, value): # Update the end frame when the slider is moved self.end_frame = value self.end_edit.setText(str(value)) def update_end_slider(self, text): # Update the end frame when the edit box is changed try: value = int(text) if self.start_frame is not None and value < self.start_frame: value = self.start_frame self.end_frame = value self.end_slider.setValue(value) if self.fps: self.end_time_label.setText(self.get_time_string(value / self.fps)) if self.end_frame is not None: self.progress_bar.setFormat(f"Will Extract {(self.end_frame - self.start_frame) // self.sampling_rate} Frames") except ValueError: pass def extract_frames(self): # Call the vid_to_frames function with the selected parameters try: self.path_name = self.vid_to_frames(self.vid_path, self.sampling_rate, self.start_frame, self.end_frame) except ValueError as e: self.progress_bar.setFormat(str(e)) return self.close() return self.path_name def stop_extraction(self): # stop the extraction process self.stop = True def get_time_string(self, seconds, separator=":"): # Convert seconds to hh:mm:ss format m, s = divmod(seconds, 60) h, m = divmod(m, 60) return f"{int(h):02d}{separator}{int(m):02d}{separator}{int(s):02d}" def vid_to_frames(self, vid_path, sampling_rate, start_frame, end_frame): """ Extracts frames from a video file and saves them as JPEG images. Args: vid_path (str): Path to the video file. sampling_rate (int): How often to save a frame. For example, if sampling_rate = 2, every other frame will be saved. start_frame (int): Starting frame number. end_frame (int): Ending frame number. """ # Check if the path exists if not os.path.exists(vid_path): raise ValueError("Video path does not exist") # Create a directory to store the frames frames_path = "".join([vid_path.split(".")[0], "_frames"]) # if the directory does not exist, create it if not os.path.exists(frames_path): os.mkdir(frames_path) # if the directory exists, delete all the files it contains else: for file in os.listdir(frames_path): os.remove(os.path.join(frames_path, file)) # Open the video file vidcap = cv2.VideoCapture(vid_path) # if the video file does not exist, raise an error # Set the starting frame vidcap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) # Get the total number of frames in the video n_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) print(f"Total number of frames: {n_frames}") # Initialize counters count = start_frame success = True # set progress bar Format while success: success, image = vidcap.read() if count % sampling_rate == 0: # Get the time in the video corresponding to the current frame time_in_sec = count / self.fps time_str = self.get_time_string(time_in_sec, separator="_") # Save the image with the time in the file name indented_count = str(count).zfill(len(str(n_frames))) cv2.imwrite(f"{frames_path}/frame_{indented_count}_time_{time_str}.jpg", image) self.progress_bar.setValue(int(((count - start_frame) / (end_frame - start_frame)) * 100)) self.progress_bar.setFormat(f"{int(((count - start_frame) / (end_frame - start_frame)) * 100)}%") count += 1 if count >= end_frame: self.progress_bar.setValue(100) break QtWidgets.QApplication.processEvents() if self.stop: self.stop = False self.progress_bar.setFormat("Extraction stopped") self.progress_bar.setValue(0) break # Show a notification if the model explorer is not the active window try: if not self.mute: if not self.isActiveWindow(): self.notification(f"Video Extraction Completed") except: pass return frames_path # if __name__ == "__main__": # app = QApplication(sys.argv) # qdarktheme.setup_theme() # window = VideoFrameExtractor() # window.show() # sys.exit(app.exec()) ================================================ FILE: DLTA_AI_app/labelme/widgets/ClassesWidget.py ================================================ from PyQt6 import QtCore from PyQt6 import QtGui from PyQt6 import QtWidgets # add ClassWidget and allow the user to select among coco classes using a combobox class Classeswidget(QtWidgets.QDialog): def __init__(self): super(Classeswidget, self).__init__() self.setModal(True) self.setWindowTitle("Select Class") self.class_name = "person" self.class_name = self._createQComboBox() def _createQComboBox(self): class_name = QtWidgets.QComboBox() class_name.addItems(["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]) class_name.currentIndexChanged.connect(self.onNewValue) return class_name def onNewValue(self, value): self.class_name = value def getValue(self): return self.class_name def setValue(self, value): self.class_name = value def exec(self): super(Classeswidget, self).exec() return self.class_name ================================================ FILE: DLTA_AI_app/labelme/widgets/MsgBox.py ================================================ from PyQt6 import QtWidgets def OKmsgBox(title, text, type = "info", turnResult = False): """ Show a message box. Args: title (str): The title of the message box. text (str): The text of the message box. type (str, optional): The type of the message box. Can be "info", "warning", or "critical". Defaults to "info". Returns: int: The result of the message box. This will be the value of the button clicked by the user. """ msgBox = QtWidgets.QMessageBox() if type == "info": msgBox.setIcon(QtWidgets.QMessageBox.Icon.Information) elif type == "warning": msgBox.setIcon(QtWidgets.QMessageBox.Warning) elif type == "critical": msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical) msgBox.setText(text) msgBox.setWindowTitle(title) if turnResult: msgBox.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok | QtWidgets.QMessageBox.Cancel) msgBox.setDefaultButton(QtWidgets.QMessageBox.StandardButton.Ok) else: msgBox.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok) msgBox.exec() return msgBox.result() ================================================ FILE: DLTA_AI_app/labelme/widgets/ThresholdWidget.py ================================================ from PyQt6 import QtCore from PyQt6 import QtGui from PyQt6 import QtWidgets class ThresholdWidget(QtWidgets.QDialog): def __init__(self): super(ThresholdWidget, self).__init__() self.setModal(True) self.setWindowTitle("Enter Threshold") self.threshold = 0.5 self.threshold = self._createQLineEdit() def _createQLineEdit(self): threshold = QtWidgets.QLineEdit() threshold.setRange(0, 1) threshold.setValue(0.5) threshold.valueChanged.connect(self.onNewValue) return threshold ================================================ FILE: DLTA_AI_app/labelme/widgets/__init__.py ================================================ # flake8: noqa from .brightness_contrast_dialog import BrightnessContrastDialog from .canvas import Canvas from .color_dialog import ColorDialog from .label_dialog import LabelDialog from .label_dialog import LabelQLineEdit from .label_list_widget import LabelListWidget from .label_list_widget import LabelListWidgetItem from .tool_bar import ToolBar from .unique_label_qlist_widget import UniqueLabelQListWidget from .zoom_widget import ZoomWidget ================================================ FILE: DLTA_AI_app/labelme/widgets/brightness_contrast_dialog.py ================================================ import PIL.Image import PIL.ImageEnhance from PyQt6.QtCore import Qt from PyQt6 import QtGui from PyQt6 import QtWidgets from .. import utils class BrightnessContrastDialog(QtWidgets.QDialog): def __init__(self, img, callback, parent=None): super(BrightnessContrastDialog, self).__init__(parent) self.setModal(True) self.setWindowTitle("Brightness/Contrast") self.slider_brightness = self._create_slider() self.slider_contrast = self._create_slider() formLayout = QtWidgets.QFormLayout() formLayout.addRow(self.tr("Brightness"), self.slider_brightness) formLayout.addRow(self.tr("Contrast"), self.slider_contrast) self.setLayout(formLayout) assert isinstance(img, PIL.Image.Image) self.img = img self.callback = callback def onNewValue(self, value): brightness = self.slider_brightness.value() / 50.0 contrast = self.slider_contrast.value() / 50.0 img = self.img img = PIL.ImageEnhance.Brightness(img).enhance(brightness) img = PIL.ImageEnhance.Contrast(img).enhance(contrast) img_data = utils.img_pil_to_data(img) qimage = QtGui.QImage.fromData(img_data) self.callback(qimage) def _create_slider(self): slider = QtWidgets.QSlider(Qt.Orientation.Horizontal) slider.setRange(0, 150) slider.setValue(50) slider.valueChanged.connect(self.onNewValue) return slider ================================================ FILE: DLTA_AI_app/labelme/widgets/canvas.py ================================================ from PyQt6 import QtCore from PyQt6 import QtGui from PyQt6 import QtWidgets from labelme import QT5 from labelme.shape import Shape import labelme.utils import copy # TODO(unknown): # - [maybe] Find optimal epsilon value. CURSOR_DEFAULT = QtCore.Qt.CursorShape.ArrowCursor CURSOR_POINT = QtCore.Qt.CursorShape.PointingHandCursor CURSOR_DRAW = QtCore.Qt.CursorShape.CrossCursor CURSOR_MOVE = QtCore.Qt.CursorShape.ClosedHandCursor CURSOR_GRAB = QtCore.Qt.CursorShape.OpenHandCursor class Canvas(QtWidgets.QWidget): zoomRequest = QtCore.pyqtSignal(int, QtCore.QPoint) scrollRequest = QtCore.pyqtSignal(int, int) newShape = QtCore.pyqtSignal() selectionChanged = QtCore.pyqtSignal(list) shapeMoved = QtCore.pyqtSignal() drawingPolygon = QtCore.pyqtSignal(bool) edgeSelected = QtCore.pyqtSignal(bool, object) vertexSelected = QtCore.pyqtSignal(bool) # SAM signals pointAdded = QtCore.pyqtSignal() samFinish = QtCore.pyqtSignal() # refresh visualization APPrefresh = QtCore.pyqtSignal(bool) CREATE, EDIT = 0, 1 CREATE, EDIT = 0, 1 # polygon only _createMode = "polygon" _fill_drawing = False def __init__(self, *args, **kwargs): self.epsilon = kwargs.pop("epsilon", 10.0) self.double_click = kwargs.pop("double_click", "close") if self.double_click not in [None, "close"]: raise ValueError( "Unexpected value for double_click event: {}".format( self.double_click ) ) self.num_backups = kwargs.pop("num_backups", 10) super(Canvas, self).__init__(*args, **kwargs) # Initialise local state. self.mode = self.EDIT self.shapes = [] # Segment anything (SAM) attributes self.SAM_mode = "" self.SAM_coordinates = [] self.SAM_rect = [] self.SAM_rects = [] self.SAM_painter = QtGui.QPainter() self.SAM_current = None # mouse tracking self.show_cross_line = True # Waiting window self.is_loading = False self.loading_angle = 0 self.loading_text = "Loading..." # tracking area self.tracking_area = "" self.tracking_area_polygon = [] self.current_annotation_mode = "" self.shapesBackups = [] self.current = None self.selectedShapes = [] # save the selected shapes here self.selectedShapesCopy = [] # self.line represents: # - createMode == 'polygon': edge from last point to current self.line = Shape() self.prevPoint = QtCore.QPoint() self.prevMovePoint = QtCore.QPoint() self.offsets = QtCore.QPoint(), QtCore.QPoint() self.scale = 1.0 self.pixmap = QtGui.QPixmap() self.visible = {} self._hideBackround = False self.hideBackround = False self.hShape = None self.prevhShape = None self.hVertex = None self.prevhVertex = None self.hEdge = None self.prevhEdge = None self.movingShape = False self._painter = QtGui.QPainter() self._cursor = CURSOR_DEFAULT # Menus: # 0: right-click without selection and dragging of shapes # 1: right-click with selection and dragging of shapes self.menus = (QtWidgets.QMenu(), QtWidgets.QMenu()) # Set widget options. self.setMouseTracking(True) self.setFocusPolicy(QtCore.Qt.FocusPolicy.WheelFocus) def fillDrawing(self): return self._fill_drawing def setFillDrawing(self, value): self._fill_drawing = value @property def createMode(self): return self._createMode @createMode.setter def createMode(self, value): if value not in [ "polygon", ]: raise ValueError("Unsupported createMode: %s" % value) self._createMode = value def storeShapes(self): shapesBackup = [] for shape in self.shapes: shapesBackup.append(shape.copy()) if len(self.shapesBackups) > self.num_backups: self.shapesBackups = self.shapesBackups[-self.num_backups - 1:] self.shapesBackups.append(shapesBackup) @property def isShapeRestorable(self): # We save the state AFTER each edit (not before) so for an # edit to be undoable, we expect the CURRENT and the PREVIOUS state # to be in the undo stack. if len(self.shapesBackups) < 2: return False return True def restoreShape(self): # This does _part_ of the job of restoring shapes. # The complete process is also done in app.py::undoShapeEdit # and app.py::loadShapes and our own Canvas::loadShapes function. if not self.isShapeRestorable: return self.shapesBackups.pop() # latest # The application will eventually call Canvas.loadShapes which will # push this right back onto the stack. shapesBackup = self.shapesBackups.pop() self.shapes = shapesBackup self.selectedShapes = [] for shape in self.shapes: shape.selected = False self.update() def enterEvent(self, ev): self.overrideCursor(self._cursor) def leaveEvent(self, ev): self.unHighlight() self.restoreCursor() def focusOutEvent(self, ev): self.restoreCursor() def isVisible(self, shape): return self.visible.get(shape, True) def drawing(self): return self.mode == self.CREATE def editing(self): return self.mode == self.EDIT def setEditing(self, value=True): self.mode = self.EDIT if value else self.CREATE if not value: # Create self.unHighlight() self.deSelectShape() def unHighlight(self): if self.hShape: self.hShape.highlightClear() self.update() self.prevhShape = self.hShape self.prevhVertex = self.hVertex self.prevhEdge = self.hEdge self.hShape = self.hVertex = self.hEdge = None def selectedVertex(self): return self.hVertex is not None def set_show_cross_line(self, enabled): """Set cross line visibility""" self.show_cross_line = enabled self.update() def mouseMoveEvent(self, ev): """Update line with last point and current coordinates.""" try: pos = self.transformPos(ev.position()) except AttributeError: return self.prevMovePoint = pos self.repaint() self.restoreCursor() # Polygon drawing. if self.drawing(): self.line.shape_type = self.createMode self.overrideCursor(CURSOR_DRAW) if not self.current: return if self.outOfPixmap(pos): # Don't allow the user to draw outside the pixmap. # Project the point to the pixmap's edges. pos = self.intersectionPoint(self.current[-1], pos) elif ( len(self.current) > 1 and self.createMode == "polygon" and self.closeEnough(pos, self.current[0]) ): # Attract line to starting point and # colorise to alert the user. pos = self.current[0] self.overrideCursor(CURSOR_POINT) self.current.highlightVertex(0, Shape.NEAR_VERTEX) if self.createMode in ["polygon"]: self.line[0] = self.current[-1] self.line[1] = pos self.repaint() self.current.highlightClear() return # Polygon copy moving. if QtCore.Qt.MouseButton.RightButton & ev.buttons(): if self.selectedShapesCopy and self.prevPoint: self.overrideCursor(CURSOR_MOVE) self.boundedMoveShapes(self.selectedShapesCopy, pos) self.repaint() elif self.selectedShapes: self.selectedShapesCopy = [ s.copy() for s in self.selectedShapes ] self.repaint() return # Polygon/Vertex moving. if QtCore.Qt.MouseButton.LeftButton & ev.buttons(): if self.selectedVertex(): self.boundedMoveVertex(pos) self.repaint() self.movingShape = True elif self.selectedShapes and self.prevPoint: self.overrideCursor(CURSOR_MOVE) self.boundedMoveShapes(self.selectedShapes, pos) self.repaint() self.movingShape = True return # Just hovering over the canvas, 2 possibilities: # - Highlight shapes # - Highlight vertex # Update shape/vertex fill and tooltip value accordingly. self.setToolTip(self.tr("Image")) for shape in reversed([s for s in self.shapes if self.isVisible(s)]): # Look for a nearby vertex to highlight. If that fails, # check if we happen to be inside a shape. index = shape.nearestVertex(pos, self.epsilon / self.scale) index_edge = shape.nearestEdge(pos, self.epsilon / self.scale) if index is not None: if self.selectedVertex(): self.hShape.highlightClear() self.prevhVertex = self.hVertex = index self.prevhShape = self.hShape = shape self.prevhEdge = self.hEdge = index_edge shape.highlightVertex(index, shape.MOVE_VERTEX) self.overrideCursor(CURSOR_POINT) self.setToolTip(self.tr("Click & drag to move point")) self.setStatusTip(self.toolTip()) self.update() break elif shape.containsPoint(pos): if self.selectedVertex(): self.hShape.highlightClear() self.prevhVertex = self.hVertex self.hVertex = None self.prevhShape = self.hShape = shape self.prevhEdge = self.hEdge = index_edge self.setToolTip( self.tr("Click & drag to move shape '%s'") % shape.label ) # conf = shape.content (to two decimal places) if shape.group_id != None and self.current_annotation_mode == 'video': self.setToolTip( self.tr(f'ID {str(shape.group_id)} {shape.label} {shape.content}')) else: self.setToolTip(self.tr(f'{shape.label} {shape.content}')) self.setStatusTip(self.toolTip()) self.overrideCursor(CURSOR_GRAB) self.update() break else: # Nothing found, clear highlights, reset state. self.unHighlight() self.edgeSelected.emit(self.hEdge is not None, self.hShape) self.vertexSelected.emit(self.hVertex is not None) def addPointToEdge(self): shape = self.prevhShape index = self.prevhEdge point = self.prevMovePoint if shape is None or index is None or point is None: return shape.insertPoint(index, point) shape.highlightVertex(index, shape.MOVE_VERTEX) self.hShape = shape self.hVertex = index self.hEdge = None self.movingShape = True def removeSelectedPoint(self): shape = self.prevhShape point = self.prevMovePoint if shape is None or point is None: return index = shape.nearestVertex(point, self.epsilon) shape.removePoint(index) self.hShape = shape self.hVertex = None self.hEdge = None self.movingShape = True # Save changes def corrected_pos_into_pixmap(self, pos): x = pos.x() y = pos.y() x = min(self.pixmap.width() , max(0, x)) y = min(self.pixmap.height(), max(0, y)) res = QtCore.QPointF(x, y) return res def mousePressEvent(self, ev): pos = self.transformPos(ev.position()) if ev.button() == QtCore.Qt.MouseButton.LeftButton: if self.drawing() and self.SAM_mode == "": if self.current: # Add point to existing shape. if self.createMode == "polygon": self.current.addPoint(self.line[1]) self.line[0] = self.current[-1] if self.current.isClosed(): self.finalise() elif not self.outOfPixmap(pos): # Create new shape. self.current = Shape(shape_type=self.createMode) self.current.addPoint(pos) self.line.points = [pos, pos] self.setHiding() self.drawingPolygon.emit(True) self.update() elif self.SAM_mode == "add point": if not self.outOfPixmap(pos): # add the coordinates and the label (1 forground 0 background) self.SAM_coordinates.append([pos.x(), pos.y(), 1]) self.pointAdded.emit() elif self.SAM_mode == 'remove point': if not self.outOfPixmap(pos): # add the coordinates and the label (1 forground 0 background) self.SAM_coordinates.append([pos.x(), pos.y(), 0]) self.pointAdded.emit() elif self.SAM_mode == 'select rect': self.SAM_rect.append(self.corrected_pos_into_pixmap(pos)) if len(self.SAM_rect) == 2: self.SAM_rects = [self.SAM_rect] self.pointAdded.emit() self.SAM_rect = [] elif self.tracking_area == "drawing": corrected_pos = self.corrected_pos_into_pixmap(pos) self.tracking_area_polygon.append([corrected_pos.x(), corrected_pos.y()]) # the other is editing mode else: group_mode = ev.modifiers() == QtCore.Qt.KeyboardModifier.ControlModifier self.selectShapePoint(pos, multiple_selection_mode=group_mode) self.prevPoint = pos self.repaint() elif ev.button() == QtCore.Qt.MouseButton.RightButton and self.editing(): group_mode = ev.modifiers() == QtCore.Qt.KeyboardModifier.ControlModifier self.selectShapePoint(pos, multiple_selection_mode=group_mode) self.prevPoint = pos self.repaint() def handle_right_click(self, menu): try: setEnabledd = menu.actions()[7].text() == "Edit &Label" and menu.actions()[7].isEnabled() if menu.actions()[10].text() == "&Mark as key": menu.actions()[10].setEnabled(setEnabledd) if menu.actions()[11].text() == "&Scale": menu.actions()[11].setEnabled(setEnabledd) except: pass return menu def mouseReleaseEvent(self, ev): pos = self.transformPos(ev.position()) if ev.button() == QtCore.Qt.MouseButton.RightButton: menu = self.menus[len(self.selectedShapesCopy) > 0] menu = self.handle_right_click(menu) self.restoreCursor() if ( not menu.exec(self.mapToGlobal(ev.pos())) and self.selectedShapesCopy ): # Cancel the move by deleting the shadow copy. self.selectedShapesCopy = [] self.repaint() elif ev.button() == QtCore.Qt.MouseButton.LeftButton and self.selectedShapes: self.overrideCursor(CURSOR_GRAB) if ( self.editing() and ev.modifiers() == QtCore.Qt.KeyboardModifier.ShiftModifier ): # Add point to line if: left-click + SHIFT on a line segment self.addPointToEdge() elif ev.button() == QtCore.Qt.MouseButton.LeftButton and self.selectedVertex(): if ( self.editing() and ev.modifiers() == QtCore.Qt.KeyboardModifier.ShiftModifier ): # Delete point if: left-click + SHIFT on a point self.removeSelectedPoint() elif ev.button() == QtCore.Qt.MouseButton.LeftButton and len(self.SAM_rect) == 1: if abs(pos.x() - self.SAM_rect[0].x()) + abs(pos.y() - self.SAM_rect[0].y()) > 50: self.SAM_rect.append(self.corrected_pos_into_pixmap(pos)) self.SAM_rects = [self.SAM_rect] self.pointAdded.emit() self.SAM_rect = [] if self.movingShape and self.hShape: index = self.shapes.index(self.hShape) if ( self.shapesBackups[-1][index].points != self.shapes[index].points ): self.storeShapes() self.shapeMoved.emit() self.movingShape = False self.APPrefresh.emit(True) def endMove(self, copy): assert self.selectedShapes and self.selectedShapesCopy assert len(self.selectedShapesCopy) == len(self.selectedShapes) if copy: for i, shape in enumerate(self.selectedShapesCopy): self.shapes.append(shape) self.selectedShapes[i].selected = False self.selectedShapes[i] = shape else: for i, shape in enumerate(self.selectedShapesCopy): self.selectedShapes[i].points = shape.points self.selectedShapesCopy = [] self.repaint() self.storeShapes() return True def hideBackroundShapes(self, value): self.hideBackround = value if self.selectedShapes: # Only hide other shapes if there is a current selection. # Otherwise the user will not be able to select a shape. self.setHiding(True) self.update() def setHiding(self, enable=True): self._hideBackround = self.hideBackround if enable else False def canCloseShape(self): return self.drawing() and self.current and len(self.current) > 2 def mouseDoubleClickEvent(self, ev): # We need at least 4 points here, since the mousePress handler # adds an extra one before this handler is called. if ( self.double_click == "close" and self.canCloseShape() and len(self.current) > 3 ): self.current.popPoint() self.finalise() if self.tracking_area == "drawing": self.tracking_area = "drawn" self.update() def selectShapes(self, shapes): self.setHiding() self.selectionChanged.emit(shapes) self.update() def selectShapePoint(self, point, multiple_selection_mode): """Select the first shape created which contains this point.""" if self.selectedVertex(): # A vertex is marked for selection. index, shape = self.hVertex, self.hShape shape.highlightVertex(index, shape.MOVE_VERTEX) else: for shape in reversed(self.shapes): if self.isVisible(shape) and shape.containsPoint(point): self.calculateOffsets(shape, point) self.setHiding() if multiple_selection_mode: if shape not in self.selectedShapes: self.selectionChanged.emit( self.selectedShapes + [shape] ) else: self.selectionChanged.emit([shape]) return self.deSelectShape() def calculateOffsets(self, shape, point): rect = shape.boundingRect() x1 = rect.x() - point.x() y1 = rect.y() - point.y() x2 = (rect.x() + rect.width() - 1) - point.x() y2 = (rect.y() + rect.height() - 1) - point.y() self.offsets = QtCore.QPoint(x1, y1), QtCore.QPoint(x2, y2) def boundedMoveVertex(self, pos): index, shape = self.hVertex, self.hShape point = shape[index] if self.outOfPixmap(pos): pos = self.intersectionPoint(point, pos) # convert pos to QPointF pos = QtCore.QPointF(pos) shape.moveVertexBy(index, pos - point) def boundedMoveShapes(self, shapes, pos): if self.outOfPixmap(pos): return False # No need to move o1 = pos + QtCore.QPointF(self.offsets[0]) if self.outOfPixmap(o1): pos -= QtCore.QPoint(min(0, o1.x()), min(0, o1.y())) o2 = pos + QtCore.QPointF(self.offsets[1]) if self.outOfPixmap(o2): pos += QtCore.QPoint( min(0, self.pixmap.width() - o2.x()), min(0, self.pixmap.height() - o2.y()), ) # XXX: The next line tracks the new position of the cursor # relative to the shape, but also results in making it # a bit "shaky" when nearing the border and allows it to # go outside of the shape's area for some reason. # self.calculateOffsets(self.selectedShapes, pos) dp = pos - self.prevPoint if dp: for shape in shapes: shape.moveBy(dp) self.prevPoint = pos return True return False def deSelectShape(self): if self.selectedShapes: self.setHiding(False) self.selectionChanged.emit([]) self.update() def deleteSelected(self): deleted_shapes = [] if self.selectedShapes: for shape in self.selectedShapes: self.shapes.remove(shape) deleted_shapes.append(shape) self.storeShapes() self.selectedShapes = [] self.update() return deleted_shapes def deleteShape(self, shape): if shape in self.selectedShapes: self.selectedShapes.remove(shape) if shape in self.shapes: self.shapes.remove(shape) self.storeShapes() self.update() def copySelectedShapes(self): if self.selectedShapes: self.selectedShapesCopy = [s.copy() for s in self.selectedShapes] self.boundedShiftShapes(self.selectedShapesCopy) self.endMove(copy=True) return self.selectedShapes def boundedShiftShapes(self, shapes): # Try to move in one direction, and if it fails in another. # Give up if both fail. point = shapes[0][0] offset = QtCore.QPoint(2.0, 2.0) self.offsets = QtCore.QPoint(), QtCore.QPoint() self.prevPoint = point if not self.boundedMoveShapes(shapes, point - offset): self.boundedMoveShapes(shapes, point + offset) def paintEvent(self, event): if not self.pixmap and not self.is_loading: return super(Canvas, self).paintEvent(event) p = self._painter p.begin(self) p.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing) p.setRenderHint(QtGui.QPainter.RenderHint.SmoothPixmapTransform) # p.setRenderHint(QtGui.QPainter.HighQualityAntialiasing) p.scale(self.scale, self.scale) p.translate(self.offsetToCenter()) p.drawPixmap(0, 0, self.pixmap) Shape.scale = self.scale # Draw loading/waiting screen if self.is_loading: # Draw a semi-transparent rectangle p.setPen(QtCore.Qt.PenStyle.NoPen) p.setBrush(QtGui.QColor(0, 0, 0, 100)) p.drawRect(self.pixmap.rect()) # Draw a spinning wheel p.setPen(QtGui.QColor(255, 255, 255)) p.setBrush(QtCore.Qt.BrushStyle.NoBrush) p.save() p.translate(self.pixmap.width() / 2, self.pixmap.height() / 2 - 50) p.rotate(self.loading_angle) p.drawEllipse(-20, -20, 40, 40) p.drawLine(0, 0, 0, -20) p.restore() self.loading_angle += 5 if self.loading_angle >= 360: self.loading_angle = 0 # Draw the loading text p.setPen(QtGui.QColor(255, 255, 255)) try: fontsize = self.pixmap.width() / 50 p.setFont(QtGui.QFont("Arial", fontsize)) except: p.setFont(QtGui.QFont("Arial", 20)) p.drawText( self.pixmap.rect(), QtCore.Qt.AlignmentFlag.AlignCenter, self.loading_text, ) p.end() self.update() return for shape in self.shapes: if (shape.selected or not self._hideBackround) and self.isVisible( shape ): shape.fill = shape.selected or shape == self.hShape shape.paint(p) if self.current: self.current.paint(p) self.line.paint(p) if self.selectedShapesCopy: for s in self.selectedShapesCopy: s.paint(p) if ( self.fillDrawing() and self.createMode == "polygon" and self.current is not None and len(self.current.points) >= 2 ): drawing_shape = self.current.copy() drawing_shape.addPoint(self.line[1]) drawing_shape.fill = True drawing_shape.paint(p) # Draw mouse coordinates if self.show_cross_line: pen = QtGui.QPen( QtGui.QColor("#00FF00"), max(1, int(round(2.0 / Shape.scale))), QtCore.Qt.PenStyle.DashLine, ) p.setPen(pen) p.setOpacity(0.5) mouseX = min( self.pixmap.width() ,max(0, self.prevMovePoint.x())) mouseY = min( self.pixmap.height() ,max(0, self.prevMovePoint.y())) p.drawLine( QtCore.QPointF(mouseX, 0), QtCore.QPointF(mouseX, self.pixmap.height()), ) p.drawLine( QtCore.QPointF(0, mouseY), QtCore.QPointF(self.pixmap.width(), mouseY), ) # draw SAM rectangle if len(self.SAM_rect) == 1: pen = QtGui.QPen( QtGui.QColor("#FF0000"), 2 * max(1, int(round(2.0 / Shape.scale))), QtCore.Qt.PenStyle.SolidLine, ) p.setPen(pen) p.setOpacity(0.8) point1 = [self.SAM_rect[0].x(), self.SAM_rect[0].y()] corrected = self.corrected_pos_into_pixmap(self.prevMovePoint) point2 = [corrected.x(), corrected.y()] x1 = min(point1[0], point2[0]) y1 = min(point1[1], point2[1]) w = abs(point1[0] - point2[0]) h = abs(point1[1] - point2[1]) p.drawRect(x1, y1, w, h) # draw SAM points if len(self.SAM_coordinates) != 0: for point in self.SAM_coordinates: color = "#FF0000" if point[2] == 0 else "#19EB25" pen = QtGui.QPen( QtGui.QColor(color), 5 * max(1, int(round(2.0 / Shape.scale))), QtCore.Qt.PenStyle.SolidLine, QtCore.Qt.PenCapStyle.RoundCap, ) p.setPen(pen) p.setOpacity(0.8) p.drawPoint(point[0], point[1]) if len(self.SAM_rects) != 0: box = self.SAM_rects[-1] pen = QtGui.QPen( QtGui.QColor("#2D7CFA"), 2 * max(1, int(round(2.0 / Shape.scale))), QtCore.Qt.PenStyle.SolidLine, ) p.setPen(pen) p.setOpacity(0.8) point1 = [box[0].x(), box[0].y()] point2 = [box[1].x(), box[1].y()] x1 = min(point1[0], point2[0]) y1 = min(point1[1], point2[1]) w = abs(point1[0] - point2[0]) h = abs(point1[1] - point2[1]) p.drawRect(x1, y1, w, h) if self.tracking_area != "": pen = QtGui.QPen( QtGui.QColor("#FF0000"), 2 * max(1, int(round(2.0 / Shape.scale))), QtCore.Qt.PenStyle.SolidLine, ) p.setPen(pen) p.setOpacity(0.1) p.setBrush(QtGui.QColor("#FF0000")); if len(self.tracking_area_polygon) > 0: corrected = self.corrected_pos_into_pixmap(self.prevMovePoint) point2 = [corrected.x(), corrected.y()] total = copy.deepcopy(self.tracking_area_polygon) if self.tracking_area == "drawing": total.append(point2) total = [ QtCore.QPoint(p[0], p[1]) for p in total] p.drawPolygon(total) p.setOpacity(0.7) if self.tracking_area == "drawing": p.drawPolyline(total) else: total.append(total[0]) p.drawPolyline(total) p.end() def transformPos(self, point): """Convert from widget-logical coordinates to painter-logical ones.""" return point / self.scale - QtCore.QPointF(self.offsetToCenter()) def offsetToCenter(self): s = self.scale area = super(Canvas, self).size() w, h = self.pixmap.width() * s, self.pixmap.height() * s aw, ah = area.width(), area.height() x = (aw - w) / (2 * s) if aw > w else 0 y = (ah - h) / (2 * s) if ah > h else 0 return QtCore.QPoint(x, y) def outOfPixmap(self, p): w, h = self.pixmap.width(), self.pixmap.height() return not (0 <= p.x() <= w - 1 and 0 <= p.y() <= h - 1) def finalise(self, SAM_SHAPE=False): if SAM_SHAPE: assert self.SAM_current self.SAM_current.close() self.storeShapes() self.SAM_current = None self.setHiding(False) self.newShape.emit() self.update() else: assert self.current self.current.close() self.shapes.append(self.current) self.storeShapes() self.current = None self.setHiding(False) self.newShape.emit() self.update() def closeEnough(self, p1, p2): # d = distance(p1 - p2) # m = (p1-p2).manhattanLength() # print "d %.2f, m %d, %.2f" % (d, m, d - m) # divide by scale to allow more precision when zoomed in return labelme.utils.distance(p1 - p2) < (self.epsilon / self.scale) def intersectionPoint(self, p1, p2): # Cycle through each image edge in clockwise fashion, # and find the one intersecting the current line segment. # http://paulbourke.net/geometry/lineline2d/ size = self.pixmap.size() points = [ (0, 0), (size.width() - 1, 0), (size.width() - 1, size.height() - 1), (0, size.height() - 1), ] # x1, y1 should be in the pixmap, x2, y2 should be out of the pixmap x1 = min(max(p1.x(), 0), size.width() - 1) y1 = min(max(p1.y(), 0), size.height() - 1) x2, y2 = p2.x(), p2.y() d, i, (x, y) = min(self.intersectingEdges((x1, y1), (x2, y2), points)) x3, y3 = points[i] x4, y4 = points[(i + 1) % 4] if (x, y) == (x1, y1): # Handle cases where previous point is on one of the edges. if x3 == x4: return QtCore.QPoint(x3, min(max(0, y2), max(y3, y4))) else: # y3 == y4 return QtCore.QPoint(min(max(0, x2), max(x3, x4)), y3) return QtCore.QPoint(x, y) def intersectingEdges(self, point1, point2, points): """Find intersecting edges. For each edge formed by `points', yield the intersection with the line segment `(x1,y1) - (x2,y2)`, if it exists. Also return the distance of `(x2,y2)' to the middle of the edge along with its index, so that the one closest can be chosen. """ (x1, y1) = point1 (x2, y2) = point2 for i in range(4): x3, y3 = points[i] x4, y4 = points[(i + 1) % 4] denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1) nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3) nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3) if denom == 0: # This covers two cases: # nua == nub == 0: Coincident # otherwise: Parallel continue ua, ub = nua / denom, nub / denom if 0 <= ua <= 1 and 0 <= ub <= 1: x = x1 + ua * (x2 - x1) y = y1 + ua * (y2 - y1) m = QtCore.QPoint((x3 + x4) / 2, (y3 + y4) / 2) d = labelme.utils.distance(m - QtCore.QPoint(x2, y2)) yield d, i, (x, y) # These two, along with a call to adjustSize are required for the # scroll area. def sizeHint(self): return self.minimumSizeHint() def minimumSizeHint(self): if self.pixmap: return self.scale * self.pixmap.size() return super(Canvas, self).minimumSizeHint() def wheelEvent(self, ev): mods = ev.modifiers() delta = ev.angleDelta() if mods.value: # with Ctrl/Command key # zoom self.zoomRequest.emit(delta.y(), ev.position().toPoint()) else: # scroll self.scrollRequest.emit(delta.x(), QtCore.Qt.Orientation.Horizontal.value) self.scrollRequest.emit(delta.y(), QtCore.Qt.Orientation.Vertical.value) ev.accept() def keyPressEvent(self, ev): key = ev.key() if key == QtCore.Qt.Key.Key_Return: if self.SAM_mode != "": self.samFinish.emit() elif self.tracking_area: self.tracking_area = "drawn" self.update() elif self.canCloseShape(): self.finalise() def cancelManualDrawing(self): self.current = None self.drawingPolygon.emit(False) self.update() def setLastLabel(self, text, flags): assert text self.shapes[-1].label = text self.shapes[-1].flags = flags self.shapesBackups.pop() self.storeShapes() return self.shapes[-1] def undoLastLine(self): assert self.shapes self.current = self.shapes.pop() self.current.setOpen() if self.createMode in ["polygon"]: self.line.points = [self.current[-1], self.current[0]] self.drawingPolygon.emit(True) def undoLastPoint(self): if not self.current or self.current.isClosed(): return self.current.popPoint() if len(self.current) > 0: self.line[0] = self.current[-1] else: self.current = None self.drawingPolygon.emit(False) self.update() def loadPixmap(self, pixmap, clear_shapes=True): self.pixmap = pixmap if clear_shapes: self.shapes = [] self.update() def loadShapes(self, shapes, replace=True): if replace: self.shapes = list(shapes) else: self.shapes.extend(shapes) self.storeShapes() self.current = None self.hShape = None self.hVertex = None self.hEdge = None self.update() def setShapeVisible(self, shape, value): self.visible[shape] = value self.update() def overrideCursor(self, cursor): self.restoreCursor() self._cursor = cursor QtWidgets.QApplication.setOverrideCursor(cursor) def restoreCursor(self): QtWidgets.QApplication.restoreOverrideCursor() def resetState(self): self.restoreCursor() self.pixmap = None self.shapesBackups = [] self.update() ================================================ FILE: DLTA_AI_app/labelme/widgets/check_updates_UI.py ================================================ from labelme.widgets.links import open_release from bs4 import BeautifulSoup import requests from PyQt6.QtWidgets import QMessageBox, QLabel from PyQt6.QtCore import Qt from PyQt6.QtGui import QFont from PyQt6 import QtWidgets import time def PopUp(): """ Check for updates of DLTA-AI and display a message box with the result. The function checks the latest release of DLTA-AI on GitHub and compares it with the current version. If the latest release is newer than the current version, a message box is displayed with a button to download the latest version. Otherwise, a message box is displayed indicating that the user is using the latest version. Args: None Returns: None """ # Import the current version of DLTA-AI from labelme import __version__ # Initialize variables updates = False tag = {} tag["href"] = None try: # Get the HTML content of the releases page on GitHub url = "https://github.com/0ssamaak0/DLTA-AI/releases" html = requests.get(url, timeout=5).text soup = BeautifulSoup(html, "html.parser") # Find the first tag with class="Link--primary" tag = soup.find("a", class_="Link--primary") # Split the tag text on the first "v" to get the latest version number lastest_version = tag.text.lower().split("v")[1] # Compare the latest version with the current version if lastest_version != __version__: text = f"New version of DLTA-AI (v{lastest_version}) is available.\n You are currently using (v{__version__})\n" updates = True else: text = f"you are using the latest version of DLTA-AI (v{__version__})\n" except: text = f"You are using DLTA-AI (v{__version__})\n There was an error checking for updates.\n" # Create a message box with the result msgBox = QMessageBox() msgBox.setWindowTitle("Check for Updates") msgBox.setFont(QFont("Arial", 10)) # Set the font size to 10 # Add the text label to the message box msgBox.setText(text) # If there are updates, add a button to download the latest version if updates: msgBox.addButton(QMessageBox.StandardButton.Yes) msgBox.button(QMessageBox.StandardButton.Yes).setText("Get the Latest Version") msgBox.button(QMessageBox.StandardButton.Yes).clicked.connect(lambda: open_release(tag["href"])) # Add a close button to the message box msgBox.addButton(QMessageBox.StandardButton.Close) msgBox.button(QMessageBox.StandardButton.Close).setText("Close") # Display the message box msgBox.exec() ================================================ FILE: DLTA_AI_app/labelme/widgets/color_dialog.py ================================================ from PyQt6 import QtWidgets class ColorDialog(QtWidgets.QColorDialog): def __init__(self, parent=None): super(ColorDialog, self).__init__(parent) self.setOption(QtWidgets.QColorDialog.ColorDialogOption.ShowAlphaChannel) # The Mac native dialog does not support our restore button. self.setOption(QtWidgets.QColorDialog.ColorDialogOption.DontUseNativeDialog) # Add a restore defaults button. # The default is set at invocation time, so that it # works across dialogs for different elements. self.default = None self.bb = self.findChild(QtWidgets.QDialogButtonBox) self.bb.addButton(QtWidgets.QDialogButtonBox.StandardButton.RestoreDefaults) self.bb.clicked.connect(self.checkRestore) def getColor(self, value=None, title=None, default=None): self.default = default if title: self.setWindowTitle(title) if value: self.setCurrentColor(value) return self.currentColor() if self.exec() else None def checkRestore(self, button): if ( self.bb.buttonRole(button) & QtWidgets.QDialogButtonBox.ButtonRole.ResetRole and self.default ): self.setCurrentColor(self.default) ================================================ FILE: DLTA_AI_app/labelme/widgets/deleteSelectedShape_UI.py ================================================ from PyQt6 import QtCore from PyQt6.QtCore import Qt from PyQt6 import QtWidgets def PopUp(TOTAL_VIDEO_FRAMES, INDEX_OF_CURRENT_FRAME, config): """ Summary: Show a dialog to choose the deletion options. ( This Frame and All Previous Frames, This Frame and All Next Frames, All Frames, This Frame Only, Specific Range of Frames ) Args: TOTAL_VIDEO_FRAMES: the total number of frames config: a dictionary of configurations Returns: result: the result of the dialog config: the updated dictionary of configurations fromFrameVAL: the start frame of the deletion range toFrameVAL: the end frame of the deletion range """ dialog = QtWidgets.QDialog() dialog.setWindowTitle("Choose Deletion Options") dialog.setWindowModality(Qt.WindowModality.ApplicationModal) dialog.resize(500, 100) dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) layout = QtWidgets.QVBoxLayout() label = QtWidgets.QLabel("Choose Deletion Options") layout.addWidget(label) prev = QtWidgets.QRadioButton("This Frame and All Previous Frames") next = QtWidgets.QRadioButton("This Frame and All Next Frames") all = QtWidgets.QRadioButton( "All Frames") only = QtWidgets.QRadioButton("This Frame Only") from_to = QtWidgets.QRadioButton( "Specific Range of Frames") from_frame = QtWidgets.QSpinBox() to_frame = QtWidgets.QSpinBox() from_frame.setRange(1, TOTAL_VIDEO_FRAMES) to_frame.setRange(1, TOTAL_VIDEO_FRAMES) from_frame.valueChanged.connect(lambda: from_to.toggle()) to_frame.valueChanged.connect(lambda: from_to.toggle()) from_label = QtWidgets.QLabel("From:") to_label = QtWidgets.QLabel("To:") if config['deleteDefault'] == 'This Frame and All Previous Frames': prev.toggle() if config['deleteDefault'] == 'This Frame and All Next Frames': next.toggle() if config['deleteDefault'] == 'All Frames': all.toggle() if config['deleteDefault'] == 'This Frame Only': only.toggle() if config['deleteDefault'] == 'Specific Range of Frames': from_to.toggle() prev.toggled.connect(lambda: config.update( {'deleteDefault': 'This Frame and All Previous Frames'})) next.toggled.connect(lambda: config.update( {'deleteDefault': 'This Frame and All Next Frames'})) all.toggled.connect(lambda: config.update( {'deleteDefault': 'All Frames'})) only.toggled.connect(lambda: config.update( {'deleteDefault': 'This Frame Only'})) from_to.toggled.connect(lambda: config.update( {'deleteDefault': 'Specific Range of Frames'})) button_layout = QtWidgets.QHBoxLayout() button_layout.addWidget(only) button_layout.addWidget(all) layout.addLayout(button_layout) button_layout = QtWidgets.QHBoxLayout() button_layout.addWidget(prev) button_layout.addWidget(next) layout.addLayout(button_layout) layout.addWidget(from_to) button_layout = QtWidgets.QHBoxLayout() button_layout.addWidget(from_label) button_layout.addWidget(from_frame) button_layout.addWidget(to_label) button_layout.addWidget(to_frame) layout.addLayout(button_layout) buttonBox = QtWidgets.QDialogButtonBox( QtWidgets.QDialogButtonBox.StandardButton.Ok) buttonBox.accepted.connect(dialog.accept) buttonBox.rejected.connect(dialog.reject) layout.addWidget(buttonBox) dialog.setLayout(layout) result = dialog.exec() mode = config['deleteDefault'] fromFrameVAL = from_frame.value() toFrameVAL = to_frame.value() if mode == 'This Frame and All Previous Frames': toFrameVAL = INDEX_OF_CURRENT_FRAME fromFrameVAL = 1 elif mode == 'This Frame and All Next Frames': toFrameVAL = TOTAL_VIDEO_FRAMES fromFrameVAL = INDEX_OF_CURRENT_FRAME elif mode == 'This Frame Only': toFrameVAL = INDEX_OF_CURRENT_FRAME fromFrameVAL = INDEX_OF_CURRENT_FRAME elif mode == 'All Frames': toFrameVAL = TOTAL_VIDEO_FRAMES fromFrameVAL = 1 return result, config, fromFrameVAL, toFrameVAL ================================================ FILE: DLTA_AI_app/labelme/widgets/editLabel_videoMode.py ================================================ from PyQt6.QtCore import Qt from PyQt6 import QtWidgets from labelme.widgets.MsgBox import OKmsgBox from labelme.utils.helpers.mathOps import coco_classes import copy def editLabel_idChanged_UI(config, old_group_id, new_group_id, id_frames_rec, INDEX_OF_CURRENT_FRAME): idChanged = old_group_id != new_group_id if not idChanged: result = QtWidgets.QDialog.DialogCode.Accepted only_this_frame = False duplicates = False return result, config, only_this_frame, duplicates dialog = QtWidgets.QDialog() dialog.setWindowTitle("Choose Edit Options") dialog.setWindowModality(Qt.WindowModality.ApplicationModal) dialog.resize(250, 100) layout = QtWidgets.QVBoxLayout() label = QtWidgets.QLabel("Choose Edit Options") layout.addWidget(label) only = QtWidgets.QRadioButton("Edit only this frame") all = QtWidgets.QRadioButton("Edit all frames with this ID") if config['EditDefault'] == 'Edit only this frame': only.toggle() if config['EditDefault'] == 'Edit all frames with this ID': all.toggle() only.toggled.connect(lambda: config.update( {'EditDefault': 'Edit only this frame'})) all.toggled.connect(lambda: config.update( {'EditDefault': 'Edit all frames with this ID'})) layout.addWidget(only) layout.addWidget(all) buttonBox = QtWidgets.QDialogButtonBox( QtWidgets.QDialogButtonBox.StandardButton.Ok) buttonBox.accepted.connect(dialog.accept) layout.addWidget(buttonBox) dialog.setLayout(layout) result = dialog.exec() only_this_frame = config['EditDefault'] == 'Edit only this frame' duplicates = check_duplicates_editLabel(id_frames_rec, old_group_id, new_group_id, only_this_frame, idChanged, INDEX_OF_CURRENT_FRAME) return result, config, only_this_frame, duplicates def check_duplicates_editLabel(id_frames_rec, old_group_id, new_group_id, only_this_frame, idChanged, currFrame): """ Summary: Check if there are id duplicates in any frame if the id is changed. Args: id_frames_rec: a dictionary of id frames records old_group_id: the old id new_group_id: the new id only_this_frame: a flag to indicate if the id is changed only in the current frame or in all frames idChanged: a flag to indicate if the id is changed or not (if False, the function returns False as there is no change) currFrame: the current frame index Returns: True if there will be duplicates, False otherwise """ if not idChanged: return False # frame record of the old id old_id_frame_record = copy.deepcopy( id_frames_rec['id_' + str(old_group_id)]) # frame record of the new id try: new_id_frame_record = copy.deepcopy( id_frames_rec['id_' + str(new_group_id)]) except: new_id_frame_record = set() pass # if the change is only in the current frame if only_this_frame: # check if the new id exists in the current frame Intersection = new_id_frame_record.intersection({currFrame}) if len(Intersection) != 0: OKmsgBox("Warning", f"Two shapes with the same ID exists.\nApparantly, a shape with ID ({new_group_id}) already exists with another shape with ID ({old_group_id}) in the CURRENT FRAME and the edit will result in two shapes with the same ID in the same frame.\n\n The edit is NOT performed.") return True # if the change is in all frames else: # check if the new id exists in any frame that the old id exists Intersection = old_id_frame_record.intersection(new_id_frame_record) if len(Intersection) != 0: reduced_Intersection = reducing_Intersection(Intersection) OKmsgBox("ID already exists", f'Two shapes with the same ID exists in at least one frame.\nApparantly, a shape with ID ({new_group_id}) already exists with another shape with ID ({old_group_id}).\nLike in frames ({reduced_Intersection}) and the edit will result in two shapes with the same ID ({new_group_id}).\n\n The edit is NOT performed.') return True return False def editLabel_handle_data(currFrame, listObj, trajectories, id_frames_rec, idChanged, only_this_frame, shape, old_group_id, new_group_id = None): """ Summary: Handle id change in edit label. Check if the id is changed or not. If the id is changed, transfer the frames from the old id to the new id. two cases: 1- only_this_frame: transfer only the current frame 2- not only_this_frame: transfer all the frames If the id is not changed, update the id in the current frame. Args: currFrame: the current frame index listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) trajectories: a dictionary of trajectories id_frames_rec: a dictionary of id frames records idChanged: a flag to indicate if the id is changed or not only_this_frame: a flag to indicate if the id is changed only in the current frame or in all frames shape: the shape to update old_group_id: the old id new_group_id: the new id, if None then the old id is used (no id change) Returns: id_frames_rec: a dictionary of id frames records trajectories: a dictionary of trajectories listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) """ if new_group_id is None or not idChanged: new_group_id = old_group_id if not idChanged: old_frames = id_frames_rec['id_' + str(old_group_id)] listObj = update_id_in_listObjframes(listObj, old_frames, shape, old_group_id) elif idChanged and only_this_frame: transfer_rec_and_traj(old_group_id, id_frames_rec, trajectories, [currFrame], new_group_id) update_id_in_listObjframe(listObj, currFrame, shape, old_group_id, new_group_id) new_frames = id_frames_rec['id_' + str(new_group_id)] update_id_in_listObjframes(listObj, new_frames, shape, new_group_id) elif idChanged and not only_this_frame: old_frames = id_frames_rec['id_' + str(old_group_id)] transfer_rec_and_traj(old_group_id, id_frames_rec, trajectories, old_frames, new_group_id) update_id_in_listObjframes(listObj, old_frames, shape, old_group_id, new_group_id) new_frames = id_frames_rec['id_' + str(new_group_id)] update_id_in_listObjframes(listObj, new_frames, shape, new_group_id) return id_frames_rec, trajectories, listObj def update_id_in_listObjframe(listObj, frame, shape, old_id, new_id = None): """ Summary: Update the id of a shape in a frame in listObj. Args: listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) frame: the frame to update shape: the shape to update old_id: the old id new_id: the new id, if None then the old id is used (no id change) Returns: listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) """ new_id = old_id if new_id is None else new_id for object_ in listObj[frame - 1]['frame_data']: if object_['tracker_id'] == old_id: object_['tracker_id'] = new_id object_['class_name'] = shape.label object_['confidence'] = str(1.0) object_['class_id'] = coco_classes.index( shape.label) if shape.label in coco_classes else -1 break return listObj def update_id_in_listObjframes(listObj, frames, shape, old_id, new_id = None): """ Summary: Update the id of a shape in a list of frames in listObj. Args: listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) frames: a list of frames to update shape: the shape to update old_id: the old id new_id: the new id, if None then the old id is used (no id change) Returns: listObj: a list of objects (each object is a dictionary of a frame with keys (frame_idx, frame_data)) """ for frame in frames: listObj = update_id_in_listObjframe(listObj, frame, shape, old_id, new_id) return listObj def transfer_rec_and_traj(id, id_frames_rec, trajectories, frames, new_id): """ Summary: Transfer frames from an id to another id. Args: id: the id to transfer from id_frames_rec: a dictionary of id frames records trajectories: a dictionary of trajectories frames: a list of frames to transfer new_id: the id to transfer to Returns: id_frames_rec: a dictionary of id frames records trajectories: a dictionary of trajectories """ # old id frame record and trajectory id_rec = id_frames_rec['id_' + str(id)] id_traj = trajectories['id_' + str(id)] # new id frame record and trajectory try: new_id_rec = id_frames_rec['id_' + str(new_id)] new_id_traj = trajectories['id_' + str(new_id)] except: new_id_rec = set() new_id_traj = [(-1, -1)] * len(id_traj) # transfer frames id_rec = id_rec - set(frames) new_id_rec = new_id_rec.union(set(frames)) # transfer trajectories for frame in frames: new_id_traj[frame - 1] = id_traj[frame - 1] id_traj[frame - 1] = (-1, -1) id_frames_rec['id_' + str(id)] = id_rec id_frames_rec['id_' + str(new_id)] = new_id_rec trajectories['id_' + str(id)] = id_traj trajectories['id_' + str(new_id)] = new_id_traj return id_frames_rec, trajectories def reducing_Intersection(Intersection): """ Summary: Reduce the intersection of two sets to a string. Make all the consecutive numbers in the intersection as a range. example: [1, 2, 3, 4, 5, 7, 8, 9] -> "1 to 5, 7 to 9" Args: Intersection: the intersection of two sets Returns: reduced_Intersection: the reduced intersection as a string """ Intersection = list(Intersection) Intersection.sort() reduced_Intersection = "" reduced_Intersection += str(Intersection[0]) flag = False i = 1 while(i < len(Intersection)): if Intersection[i] - Intersection[i - 1] == 1: reduced_Intersection += " to " if not flag else "" flag = True if i + 1 == len(Intersection): reduced_Intersection += str(Intersection[i]) else: if flag: reduced_Intersection += str(Intersection[i - 1]) if i + 1 < len(Intersection): reduced_Intersection += ", " + str(Intersection[i]) i += 1 flag = False else: reduced_Intersection += ", " + str(Intersection[i]) i += 1 return reduced_Intersection ================================================ FILE: DLTA_AI_app/labelme/widgets/escapable_qlist_widget.py ================================================ from PyQt6.QtCore import Qt from PyQt6 import QtWidgets class EscapableQListWidget(QtWidgets.QListWidget): def keyPressEvent(self, event): super(EscapableQListWidget, self).keyPressEvent(event) if event.key() == Qt.Key.Key_Escape: self.clearSelection() ================================================ FILE: DLTA_AI_app/labelme/widgets/exportData_UI.py ================================================ from PyQt6.QtCore import Qt from PyQt6 import QtGui from PyQt6 import QtWidgets from labelme.widgets import open_file try: from labelme.utils.custom_exports import custom_exports_list except: custom_exports_list = [] print("custom_exports file not found") def PopUp(mode = "video"): """ Displays a dialog box for choosing export options for annotations and videos. Args: mode (str): The mode of the export. Can be either "video" or "image". Defaults to "video". Returns: A tuple containing the result of the dialog box and the selected export options. If the dialog box is accepted, the first element of the tuple is `QtWidgets.QDialog.DialogCode.Accepted`. Otherwise, it is `QtWidgets.QDialog.Rejected`. The second element of the tuple is a boolean indicating whether to export annotations in COCO format. If `mode` is "video", the third element of the tuple is a boolean indicating whether to export annotations in MOT format, and the fourth element is a boolean indicating whether to export the video with the current visualization settings. If there are any custom export options available, the fifth element of the tuple is a list of booleans indicating whether to export using each custom export option. """ dialog = QtWidgets.QDialog() dialog.setWindowTitle("Choose Export Options") dialog.setWindowModality(Qt.WindowModality.ApplicationModal) dialog.resize(250, 100) layout = QtWidgets.QVBoxLayout() font = QtGui.QFont() font.setBold(True) font.setPointSize(10) if mode == "video": vid_label = QtWidgets.QLabel("Export Video") vid_label.setFont(font) vid_label.setMargin(7) std_label = QtWidgets.QLabel("Export Annotations (Standard Formats)") std_label.setFont(font) std_label.setMargin(7) custom_label = QtWidgets.QLabel("Export Annotations (Custom Formats)") custom_label.setFont(font) custom_label.setMargin(7) # Create a button group to hold the radio buttons button_group = QtWidgets.QButtonGroup() # Create the radio buttons and add them to the button group coco_radio = QtWidgets.QRadioButton( "COCO Format (Detection / Segmentation)") # make the video and mot radio buttons if the mode is video if mode == "video": video_radio = QtWidgets.QRadioButton("Export Video with current visualization settings") mot_radio = QtWidgets.QRadioButton("MOT Format (Tracking)") # make the custom exports radio buttons custom_exports_radio_list = [] if len(custom_exports_list) != 0: for custom_exp in custom_exports_list: if custom_exp.mode == "video" and mode == "video": custom_radio = QtWidgets.QRadioButton(custom_exp.button_name) button_group.addButton(custom_radio) custom_exports_radio_list.append(custom_radio) if custom_exp.mode == "image" and mode == "image": custom_radio = QtWidgets.QRadioButton(custom_exp.button_name) button_group.addButton(custom_radio) custom_exports_radio_list.append(custom_radio) button_group.addButton(coco_radio) # add the video and mot radio buttons to the button group if the mode is video if mode == "video": button_group.addButton(video_radio) button_group.addButton(mot_radio) # Add custom radio buttons to the button group if len(custom_exports_list) != 0: for custom_radio in custom_exports_radio_list: button_group.addButton(custom_radio) # Add to the layout # video label and radio buttons if mode == "video": layout.addWidget(vid_label) layout.addWidget(video_radio) # standard label and radio buttons layout.addWidget(std_label) layout.addWidget(coco_radio) if mode == "video": layout.addWidget(mot_radio) # custom label and radio buttons layout.addWidget(custom_label) if len(custom_exports_radio_list) != 0: for custom_radio in custom_exports_radio_list: layout.addWidget(custom_radio) else: layout.addWidget(QtWidgets.QLabel("No Custom Exports Available, you can add them in utils.custom_exports.py")) # create button when clicking it open custom_exports.py file custom_exports_button = QtWidgets.QPushButton("Open Custom Exports") custom_exports_button.clicked.connect(open_file.PopUp) layout.addWidget(custom_exports_button) buttonBox = QtWidgets.QDialogButtonBox( QtWidgets.QDialogButtonBox.StandardButton.Ok | QtWidgets.QDialogButtonBox.StandardButton.Cancel) buttonBox.accepted.connect(dialog.accept) buttonBox.rejected.connect(dialog.reject) layout.addWidget(buttonBox) dialog.setLayout(layout) result = dialog.exec() # prepare the checked list of custom exports custom_exports_radio_checked_list = [] if len(custom_exports_list) != 0: for custom_radio in custom_exports_radio_list: custom_exports_radio_checked_list.append(custom_radio.isChecked()) if mode == "video": return result, coco_radio.isChecked(), mot_radio.isChecked(), video_radio.isChecked(), custom_exports_radio_checked_list else: return result, coco_radio.isChecked(), custom_exports_radio_checked_list ================================================ FILE: DLTA_AI_app/labelme/widgets/feedback_UI.py ================================================ from labelme.widgets.links import open_issue from PyQt6.QtWidgets import QMessageBox from PyQt6.QtCore import Qt def PopUp(): """ Displays a dialog box for providing feedback on the DLTA-AI project. Parameters: None Returns: None """ # Define the text for the feedback dialog box text = "Found a bug? 🐞\nWant to suggest a feature? 🌟\n" # Create the feedback dialog box msgBox = QMessageBox() msgBox.setWindowTitle("Feedback") msgBox.setText(text) # Add a button to open the GitHub issues page msgBox.addButton(QMessageBox.StandardButton.Yes) msgBox.button(QMessageBox.StandardButton.Yes).setText("Open an Issue") msgBox.button(QMessageBox.StandardButton.Yes).clicked.connect(open_issue) # Add a close button msgBox.addButton(QMessageBox.StandardButton.Close) msgBox.button(QMessageBox.StandardButton.Close).setText("Close") # Display the feedback dialog box msgBox.exec() ================================================ FILE: DLTA_AI_app/labelme/widgets/getIDfromUser_UI.py ================================================ from PyQt6.QtCore import Qt from PyQt6 import QtWidgets from .MsgBox import OKmsgBox from labelme.utils.helpers.mathOps import is_id_repeated def PopUp(self, group_id, text): """ Summary: Show a dialog to get a new id from the user. check if the id is repeated. Args: self: the main window object to access the canvas group_id: the group id text: Class name Returns: group_id: the new group id text: Class name (False if the user-input id is repeated) """ mainTEXT = "A Shape with that ID already exists in this frame.\n\n" repeated = 0 while is_id_repeated(self, group_id): dialog = QtWidgets.QDialog() dialog.setWindowTitle("ID already exists") dialog.setWindowModality(Qt.WindowModality.ApplicationModal) dialog.resize(450, 100) if repeated == 0: label = QtWidgets.QLabel(mainTEXT + f'Please try a new ID: ') if repeated == 1: label = QtWidgets.QLabel( mainTEXT + f'OH GOD.. AGAIN? I hpoe you are not doing this on purpose..') if repeated == 2: label = QtWidgets.QLabel( mainTEXT + f'AGAIN? REALLY? LAST time for you..') if repeated == 3: text = False return group_id, text properID = QtWidgets.QSpinBox() properID.setRange(1, 1000) buttonBox = QtWidgets.QDialogButtonBox( QtWidgets.QDialogButtonBox.StandardButton.Ok) buttonBox.accepted.connect(dialog.accept) layout = QtWidgets.QVBoxLayout() layout.addWidget(label) layout.addWidget(properID) layout.addWidget(buttonBox) dialog.setLayout(layout) result = dialog.exec() if result != QtWidgets.QDialog.DialogCode.Accepted: text = False return group_id, text group_id = properID.value() repeated += 1 if repeated > 1: OKmsgBox("Finally..!", "OH, Finally..!") return group_id, text ================================================ FILE: DLTA_AI_app/labelme/widgets/interpolation_UI.py ================================================ from PyQt6 import QtCore from PyQt6.QtCore import Qt from PyQt6 import QtGui from PyQt6 import QtWidgets def PopUp(config): """ Summary: Show a dialog to choose the interpolation options. ( interpolate only missed frames between detected frames, interpolate all frames between your KEY frames, interpolate ALL frames with SAM (more precision, more time) ) Args: config: a dictionary of configurations Returns: result: the result of the dialog config: the updated dictionary of configurations """ def show_unshow_overwrite(): if with_sam.isChecked(): config.update({'interpolationDefMethod': 'SAM'}) overwrite_checkBox.setEnabled(True) else: config.update({'interpolationDefMethod': 'Linear'}) overwrite_checkBox.setEnabled(False) dialog = QtWidgets.QDialog() dialog.setWindowTitle("Choose Interpolation Options") dialog.setWindowModality(Qt.WindowModality.ApplicationModal) dialog.resize(250, 100) dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) layout = QtWidgets.QVBoxLayout() label = QtWidgets.QLabel("Choose Interpolation Options") label.setFont(QtGui.QFont("Arial", 10)) method_label = QtWidgets.QLabel("Interpolation Method") between_label = QtWidgets.QLabel("Interpolation Between") layout.addWidget(label) # Interpolation Method button group method_group = QtWidgets.QButtonGroup() with_linear = QtWidgets.QRadioButton("Linear Interpolation") with_sam = QtWidgets.QRadioButton("SAM Interpolation") method_group.addButton(with_linear) method_group.addButton(with_sam) with_linear.toggled.connect(show_unshow_overwrite) with_sam.toggled.connect(show_unshow_overwrite) layout.addWidget(method_label) method_layout = QtWidgets.QHBoxLayout() method_layout.addWidget(with_linear) method_layout.addWidget(with_sam) layout.addLayout(method_layout) # Keyframes button group between_group = QtWidgets.QButtonGroup() with_keyframes = QtWidgets.QRadioButton("Selected Keyframes") without_keyframes = QtWidgets.QRadioButton("Detected Frames") between_group.addButton(with_keyframes) between_group.addButton(without_keyframes) with_keyframes.toggled.connect(lambda: config.update({'interpolationDefType': 'key' * with_keyframes.isChecked()})) without_keyframes.toggled.connect(lambda: config.update({'interpolationDefType': 'all' * without_keyframes.isChecked()})) layout.addWidget(between_label) keyframes_layout = QtWidgets.QHBoxLayout() keyframes_layout.addWidget(with_keyframes) keyframes_layout.addWidget(without_keyframes) layout.addLayout(keyframes_layout) overwrite_checkBox = QtWidgets.QCheckBox("Overwrite used frames with SAM") overwrite_checkBox.setChecked(config['interpolationOverwrite']) overwrite_checkBox.toggled.connect(lambda: config.update({'interpolationOverwrite': overwrite_checkBox.isChecked()})) layout.addWidget(overwrite_checkBox) show_unshow_overwrite() # for some reason you must check linear then sam to make it work with_linear.setChecked(True) buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.StandardButton.Ok) buttonBox.accepted.connect(dialog.accept) layout.addWidget(buttonBox) dialog.setLayout(layout) result = dialog.exec() return result, config ================================================ FILE: DLTA_AI_app/labelme/widgets/label_dialog.py ================================================ import re from qtpy import QT_VERSION from PyQt6 import QtCore from PyQt6 import QtGui from PyQt6 import QtWidgets from labelme.logger import logger import labelme.utils QT5 = QT_VERSION[0] == "5" # TODO(unknown): # - Calculate optimal position so as not to go out of screen area. class LabelQLineEdit(QtWidgets.QLineEdit): def setListWidget(self, list_widget): self.list_widget = list_widget def keyPressEvent(self, e): if e.key() in [QtCore.Qt.Key.Key_Up, QtCore.Qt.Key.Key_Down]: self.list_widget.keyPressEvent(e) else: super(LabelQLineEdit, self).keyPressEvent(e) class LabelDialog(QtWidgets.QDialog): def __init__( self, text="Enter object label", parent=None, labels=None, sort_labels=True, show_text_field=True, completion="startswith", fit_to_content=None, flags=None, ): if fit_to_content is None: fit_to_content = {"row": False, "column": True} self._fit_to_content = fit_to_content super(LabelDialog, self).__init__(parent) self.setWindowTitle("Edit Label") self.edit = LabelQLineEdit() self.edit.setPlaceholderText(text) self.edit.setValidator(labelme.utils.labelValidator()) self.edit.editingFinished.connect(self.postProcess) if flags: self.edit.textChanged.connect(self.updateFlags) self.edit_group_id = QtWidgets.QLineEdit() self.edit_group_id.setPlaceholderText("Tracking ID") self.edit_group_id.setValidator( QtGui.QRegularExpressionValidator(QtCore.QRegularExpression(r"\d*"), None) ) self.edit_group_id_label = QtWidgets.QLabel() self.edit_group_id_label.setText("Tracking ID") self.select_class_label = QtWidgets.QLabel() self.select_class_label.setText("Class Name") # buttons self.buttonBox = bb = QtWidgets.QDialogButtonBox( QtWidgets.QDialogButtonBox.StandardButton.Ok | QtWidgets.QDialogButtonBox.StandardButton.Cancel, QtCore.Qt.Orientation.Horizontal, self ) bb.button(bb.StandardButton.Ok).setIcon(labelme.utils.newIcon("done")) bb.button(bb.StandardButton.Cancel).setIcon(labelme.utils.newIcon("undo")) bb.setCenterButtons(True) # center the buttons bb.accepted.connect(self.validate) bb.rejected.connect(self.reject) # label_list self.labelList = QtWidgets.QListWidget() if self._fit_to_content["row"]: self.labelList.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff ) if self._fit_to_content["column"]: self.labelList.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff ) self._sort_labels = sort_labels if labels: self.labelList.addItems(labels) if self._sort_labels: self.labelList.sortItems() else: self.labelList.setDragDropMode( QtWidgets.QAbstractItemView.InternalMove ) self.labelList.currentItemChanged.connect(self.labelSelected) self.labelList.itemDoubleClicked.connect(self.labelDoubleClicked) self.edit.setListWidget(self.labelList) self.labelListLabel = QtWidgets.QLabel() self.labelListLabel.setText("Select From Class List") # label_flags if flags is None: flags = {} self._flags = flags self.flagsLayout = QtWidgets.QVBoxLayout() self.resetFlags() self.edit.textChanged.connect(self.updateFlags) # confidence self.confidenceEdit = QtWidgets.QLineEdit() self.confidenceEdit.setPlaceholderText('Confidence') # Add a validator to accept only floats between 0 and 1 validator = QtGui.QDoubleValidator(0, 1, 2, self.confidenceEdit) self.confidenceEdit.setValidator(validator) # add title before confidence self.confidenceEditLabel = QtWidgets.QLabel() self.confidenceEditLabel.setText('Confidence') layout = QtWidgets.QVBoxLayout() layout.addItem(self.flagsLayout) layout.addWidget(self.select_class_label) layout.addWidget(self.edit) # Create a vertical layout for the edit group ID label and edit edit_group_id_layout = QtWidgets.QVBoxLayout() edit_group_id_layout.addWidget(self.edit_group_id_label) edit_group_id_layout.addWidget(self.edit_group_id) # Create a vertical layout for the confidence label and edit confidence_layout = QtWidgets.QVBoxLayout() confidence_layout.addWidget(self.confidenceEditLabel) confidence_layout.addWidget(self.confidenceEdit) # add both vertical layouts to a horizontal layout horizontal_layout = QtWidgets.QHBoxLayout() horizontal_layout.addItem(edit_group_id_layout) horizontal_layout.addSpacing(10) # add 10 pixels of space horizontal_layout.addItem(confidence_layout) # add the horizontal layout to the main layout layout.addItem(horizontal_layout) # add the label list and label list label to the main layout layout.addWidget(self.labelListLabel) layout.addWidget(self.labelList) layout.addWidget(bb) self.resize(300,200) self.setLayout(layout) # completion completer = QtWidgets.QCompleter() if not QT5 and completion != "startswith": logger.warn( "completion other than 'startswith' is only " "supported with Qt5. Using 'startswith'" ) completion = "startswith" if completion == "startswith": completer.setCompletionMode(QtWidgets.QCompleter.CompletionMode.InlineCompletion) # Default settings. # completer.setFilterMode(QtCore.Qt.MatchStartsWith) elif completion == "contains": completer.setCompletionMode(QtWidgets.QCompleter.CompletionMode.PopupCompletion) completer.setFilterMode(QtCore.Qt.MatchFlag.MatchContains) else: raise ValueError("Unsupported completion: {}".format(completion)) completer.setModel(self.labelList.model()) self.edit.setCompleter(completer) def addLabelHistory(self, label): if self.labelList.findItems(label, QtCore.Qt.MatchFlag.MatchExactly): return self.labelList.addItem(label) if self._sort_labels: self.labelList.sortItems() def labelSelected(self, item): self.edit.setText(item.text()) def validate(self): text = self.edit.text() if hasattr(text, "strip"): text = text.strip() else: text = text.trimmed() if text: self.accept() def labelDoubleClicked(self, item): self.validate() def postProcess(self): text = self.edit.text() if hasattr(text, "strip"): text = text.strip() else: text = text.trimmed() self.edit.setText(text) def updateFlags(self, label_new): # keep state of shared flags flags_old = self.getFlags() flags_new = {} for pattern, keys in self._flags.items(): if re.match(pattern, label_new): for key in keys: flags_new[key] = flags_old.get(key, False) self.setFlags(flags_new) def deleteFlags(self): for i in reversed(range(self.flagsLayout.count())): item = self.flagsLayout.itemAt(i).widget() self.flagsLayout.removeWidget(item) item.setParent(None) def resetFlags(self, label=""): flags = {} for pattern, keys in self._flags.items(): if re.match(pattern, label): for key in keys: flags[key] = False self.setFlags(flags) def setFlags(self, flags): self.deleteFlags() for key in flags: item = QtWidgets.QCheckBox(key, self) item.setChecked(flags[key]) self.flagsLayout.addWidget(item) item.show() def getFlags(self): flags = {} for i in range(self.flagsLayout.count()): item = self.flagsLayout.itemAt(i).widget() print(type(item)) flags[item.text()] = item.isChecked() return flags def getGroupId(self): group_id = self.edit_group_id.text() if group_id: return int(group_id) return None def getContent(self): content = self.confidenceEdit.text() if content: return content return None def setContent(self, content): if type(content) != str: content = str(content) self.confidenceEdit.setText(content) def popUp(self, text=None, move=True, flags=None, group_id=None, content=None, skip_flag=False): if self._fit_to_content["row"]: self.labelList.setMinimumHeight( self.labelList.sizeHintForRow(0) * self.labelList.count() + 2 ) if self._fit_to_content["column"]: self.labelList.setMinimumWidth( self.labelList.sizeHintForColumn(0) + 2 ) # if text is None, the previous label in self.edit is kept if text is None: text = self.edit.text() # if content is None, make the self.confidenceEdit empty if content is None: content="" self.setContent(content) if flags: self.setFlags(flags) else: self.resetFlags(text) self.edit.setText(text) self.edit.setSelection(0, len(text)) if group_id is None: self.edit_group_id.clear() else: self.edit_group_id.setText(str(group_id)) items = self.labelList.findItems(text, QtCore.Qt.MatchFlag.MatchFixedString) if items: if len(items) != 1: logger.warning("Label list has duplicate '{}'".format(text)) self.labelList.setCurrentItem(items[0]) row = self.labelList.row(items[0]) self.edit.completer().setCurrentRow(row) self.edit.setFocus(QtCore.Qt.FocusReason.PopupFocusReason) if move: self.move(QtGui.QCursor.pos()) if skip_flag: return self.edit.text(), self.getFlags(), self.getGroupId(), self.getContent() if self.exec(): return self.edit.text(), self.getFlags(), self.getGroupId(), self.getContent() else: return None, None, None, None ================================================ FILE: DLTA_AI_app/labelme/widgets/label_list_widget.py ================================================ from PyQt6 import QtCore from PyQt6.QtCore import Qt from PyQt6 import QtGui from PyQt6.QtGui import QPalette from PyQt6 import QtWidgets from PyQt6.QtWidgets import QStyle # https://stackoverflow.com/a/2039745/4158863 class HTMLDelegate(QtWidgets.QStyledItemDelegate): def __init__(self, parent=None): super(HTMLDelegate, self).__init__() self.doc = QtGui.QTextDocument(self) def paint(self, painter, option, index): painter.save() options = QtWidgets.QStyleOptionViewItem(option) self.initStyleOption(options, index) self.doc.setHtml(options.text) options.text = "" style = ( QtWidgets.QApplication.style() if options.widget is None else options.widget.style() ) style.drawControl(QStyle.ControlElement.CE_ItemViewItem, options, painter) ctx = QtGui.QAbstractTextDocumentLayout.PaintContext() if option.state & QStyle.StateFlag.State_Selected: ctx.palette.setColor( QPalette.ColorRole.Text, option.palette.color( QPalette.ColorGroup.Active, QPalette.ColorRole.HighlightedText ), ) else: ctx.palette.setColor( QPalette.ColorRole.Text, option.palette.color(QPalette.ColorGroup.Active, QPalette.ColorRole.Text), ) textRect = style.subElementRect(QStyle.SubElement.SE_ItemViewItemText, options) if index.column() != 0: textRect.adjust(5, 0, 0, 0) thefuckyourshitup_constant = 4 margin = (option.rect.height() - options.fontMetrics.height()) // 2 margin = margin - thefuckyourshitup_constant textRect.setTop(textRect.top() + margin) painter.translate(textRect.topLeft()) painter.setClipRect(textRect.translated(-textRect.topLeft())) self.doc.documentLayout().draw(painter, ctx) painter.restore() def sizeHint(self, option, index): thefuckyourshitup_constant = 4 return QtCore.QSize( self.doc.idealWidth(), self.doc.size().height() - thefuckyourshitup_constant, ) class LabelListWidgetItem(QtGui.QStandardItem): def __init__(self, text=None, shape=None): super(LabelListWidgetItem, self).__init__() self.setText(text) self.setShape(shape) self.setCheckable(True) self.setCheckState(Qt.CheckState.Checked) self.setEditable(False) self.setTextAlignment(Qt.AlignmentFlag.AlignBottom) font = QtGui.QFont("Arial", 10) self.setFont(font) def clone(self): return LabelListWidgetItem(self.text(), self.shape()) def setShape(self, shape): self.setData(shape, Qt.ItemDataRole.UserRole) def shape(self): return self.data(Qt.ItemDataRole.UserRole) def __hash__(self): return id(self) def __repr__(self): return '{}("{}")'.format(self.__class__.__name__, self.text()) class StandardItemModel(QtGui.QStandardItemModel): itemDropped = QtCore.pyqtSignal() def removeRows(self, *args, **kwargs): ret = super().removeRows(*args, **kwargs) self.itemDropped.emit() return ret class LabelListWidget(QtWidgets.QListView): itemDoubleClicked = QtCore.pyqtSignal(LabelListWidgetItem) itemSelectionChanged = QtCore.pyqtSignal(list, list) def __init__(self): super(LabelListWidget, self).__init__() self._selectedItems = [] self.setWindowFlags(Qt.WindowType.Window) self.setModel(StandardItemModel()) self.model().setItemPrototype(LabelListWidgetItem()) self.setItemDelegate(HTMLDelegate()) self.setSelectionMode(QtWidgets.QAbstractItemView.SelectionMode.ExtendedSelection) self.setDragDropMode(QtWidgets.QAbstractItemView.DragDropMode.InternalMove) self.setDefaultDropAction(QtCore.Qt.DropAction.MoveAction) self.doubleClicked.connect(self.itemDoubleClickedEvent) self.selectionModel().selectionChanged.connect( self.itemSelectionChangedEvent ) def __len__(self): return self.model().rowCount() def __getitem__(self, i): return self.model().item(i) def __iter__(self): for i in range(len(self)): yield self[i] @property def itemDropped(self): return self.model().itemDropped @property def itemChanged(self): return self.model().itemChanged def itemSelectionChangedEvent(self, selected, deselected): selected = [self.model().itemFromIndex(i) for i in selected.indexes()] deselected = [ self.model().itemFromIndex(i) for i in deselected.indexes() ] self.itemSelectionChanged.emit(selected, deselected) def itemDoubleClickedEvent(self, index): self.itemDoubleClicked.emit(self.model().itemFromIndex(index)) def selectedItems(self): return [self.model().itemFromIndex(i) for i in self.selectedIndexes()] def scrollToItem(self, item): self.scrollTo(self.model().indexFromItem(item)) def addItem(self, item): if not isinstance(item, LabelListWidgetItem): raise TypeError("item must be LabelListWidgetItem") self.model().setItem(self.model().rowCount(), 0, item) item.setSizeHint(self.itemDelegate().sizeHint(None, None)) def removeItem(self, item): index = self.model().indexFromItem(item) self.model().removeRows(index.row(), 1) def selectItem(self, item): index = self.model().indexFromItem(item) self.selectionModel().select(index, QtCore.QItemSelectionModel.SelectionFlag.Select) def findItemByShape(self, shape): for row in range(self.model().rowCount()): item = self.model().item(row, 0) if item.shape() == shape: return item raise ValueError("cannot find shape: {}".format(shape)) def clear(self): self.model().clear() ================================================ FILE: DLTA_AI_app/labelme/widgets/links.py ================================================ import webbrowser def open_git_hub(): """ Opens the GitHub repository for the DLTA-AI project in the default web browser. Parameters: None Returns: None """ # Open the GitHub repository in the default web browser webbrowser.open('https://github.com/0ssamaak0/DLTA-AI') def open_issue(): """ Opens the GitHub issues page for the DLTA-AI project in the default web browser. Parameters: None Returns: None """ # Open the GitHub issues page in the default web browser webbrowser.open('https://github.com/0ssamaak0/DLTA-AI/issues') def open_license(): """ Opens the license file for the DLTA-AI project in the default web browser. Parameters: None Returns: None """ # Open the license file in the default web browser webbrowser.open('https://github.com/0ssamaak0/DLTA-AI/blob/master/LICENSE') def open_guide(): """ Opens the guide for the DLTA-AI project in the default web browser. Parameters: None Returns: None """ # Open the guide in the default web browser webbrowser.open('https://0ssamaak0.github.io/DLTA-AI/') def open_release(link = None): """ Opens the release page for the DLTA-AI project in the default web browser. Parameters: link (str): The link to the release page. If None, the default link will be used. Returns: None """ # Import necessary modules import webbrowser # If no link was provided, use the default link if link is None: link = 'https://github.com/0ssamaak0/DLTA-AI/releases' else: link = "https://github.com/" + link # Open the release page in the default web browser webbrowser.open(link) ================================================ FILE: DLTA_AI_app/labelme/widgets/merge_feature_UI.py ================================================ import json from PyQt6 import QtWidgets from PyQt6 import QtCore # create an interface for merging features class MergeFeatureUI(): def __init__(self, parent): self.parent = parent self.selectedmodels = [] # merge dialog def mergeSegModels(self): # add a resizable and scrollable dialog that contains all the models and allow the user to select among them using checkboxes models = [] with open("saved_models.json") as json_file: data = json.load(json_file) for model in data.keys(): if "YOLOv8" not in model: models.append(model) # ExplorerMerge = ModelExplorerDialog(merge=True) # ExplorerMerge.adjustSize() # ExplorerMerge.resize( # int(ExplorerMerge.width() * 2), int(ExplorerMerge.height() * 1.5)) # ExplorerMerge.exec() dialog = QtWidgets.QDialog(self.parent) dialog.setWindowTitle('Select Models') dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) dialog.setWindowModality(QtCore.Qt.WindowModality.ApplicationModal) dialog.resize(200, 250) dialog.setMinimumSize(QtCore.QSize(200, 200)) verticalLayout = QtWidgets.QVBoxLayout(dialog) verticalLayout.setObjectName("verticalLayout") scrollArea = QtWidgets.QScrollArea(dialog) scrollArea.setWidgetResizable(True) scrollArea.setObjectName("scrollArea") scrollAreaWidgetContents = QtWidgets.QWidget() scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 478, 478)) scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents") verticalLayout_2 = QtWidgets.QVBoxLayout(scrollAreaWidgetContents) verticalLayout_2.setObjectName("verticalLayout_2") self.scrollAreaWidgetContents = scrollAreaWidgetContents scrollArea.setWidget(scrollAreaWidgetContents) verticalLayout.addWidget(scrollArea) buttonBox = QtWidgets.QDialogButtonBox(dialog) buttonBox.setOrientation(QtCore.Qt.Orientation.Horizontal) buttonBox.setStandardButtons( QtWidgets.QDialogButtonBox.StandardButton.Cancel | QtWidgets.QDialogButtonBox.StandardButton.Ok) buttonBox.setObjectName("buttonBox") verticalLayout.addWidget(buttonBox) buttonBox.accepted.connect(dialog.accept) buttonBox.rejected.connect(dialog.reject) self.models = [] for i in range(len(models)): self.models.append(QtWidgets.QCheckBox(models[i], dialog)) verticalLayout_2.addWidget(self.models[i]) dialog.show() dialog.exec() self.selectedmodels.clear() for i in range(len(self.models)): if self.models[i].isChecked(): self.selectedmodels.append(self.models[i].text()) print(self.selectedmodels) return self.selectedmodels ================================================ FILE: DLTA_AI_app/labelme/widgets/notification.py ================================================ import os def PopUp(text): """ Sends a desktop notification with the given text. Args: text (str): The text to display in the notification. Returns: None """ try: from notifypy import Notify # Create a Notify object with the default title notification = Notify(default_notification_title="DLTA-AI") # Set the message of the notification to the given text notification.message = text # Set the notification icon print(os.getcwd()) notification.icon = "labelme/icons/icon.ico" # Send the notification asynchronously notification.send(block=False) except Exception as e: print(e) print("please install notifypy to get desktop notifications") ================================================ FILE: DLTA_AI_app/labelme/widgets/open_file.py ================================================ import os import subprocess import platform def PopUp(): """ Open a file with the default application for the file type. Args: filename (str): The name of the file to open. Raises: OSError: If the file cannot be opened. Returns: None """ filename = os.path.join(os.getcwd(), 'labelme/utils/custom_exports.py') print(filename) # Determine the platform and use the appropriate command to open the file # Windows if platform.system() == 'Windows': os.startfile(filename) # macOS elif platform.system() == 'Darwin': os.system(f'open {filename}') else: try: opener = "open" if platform.system() == "Darwin" else "xdg-open" subprocess.call([opener, filename]) except OSError: print(f"Could not open file: {filename}") ================================================ FILE: DLTA_AI_app/labelme/widgets/preferences_UI.py ================================================ import yaml from PyQt6 import QtWidgets, QtGui, QtCore def PopUp(): """ Description: This function displays a dialog box with preferences for the LabelMe application, including theme and notification settings. Parameters: This function takes no parameters. Returns: If the user clicks the OK button, this function writes the new theme and notification settings to the config file and returns `QtWidgets.QDialog.DialogCode.Accepted`. If the user clicks the Cancel button, this function does not write any changes to the config file and returns `QtWidgets.QDialog.Rejected`. Libraries: This function requires the following libraries to be installed: - yaml - PyQt6.QtWidgets - PyQt6.QtGui - PyQt6.QtCore """ with open("labelme/config/default_config.yaml", "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) # Create the dialog dialog = QtWidgets.QDialog() dialog.setWindowTitle("Preferences") dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) # Create the labels themeLabel = QtWidgets.QLabel("Theme Settings 🌓") themeLabel.setFont(QtGui.QFont("Arial", 10, QtGui.QFont.Weight.Bold)) theme_note_label = QtWidgets.QLabel("Requires app restart to take effect") notificationLabel = QtWidgets.QLabel("Notifications Settings 🔔") notificationLabel.setFont(QtGui.QFont("Arial", 10, QtGui.QFont.Weight.Bold)) notification_note_label = QtWidgets.QLabel("Notifications works only for long tasks and if the app isn't focused") # Load the current theme from the config file current_theme = config["theme"] current_mute = config["mute"] # Create the radio buttons autoButton = QtWidgets.QRadioButton("OS Default") lightButton = QtWidgets.QRadioButton("Light") darkButton = QtWidgets.QRadioButton("Dark") # Set the current theme as the default selection if current_theme == "auto": autoButton.setChecked(True) elif current_theme == "light": lightButton.setChecked(True) elif current_theme == "dark": darkButton.setChecked(True) # Create the images autoImage = QtGui.QPixmap("labelme/icons/auto-img.png").scaledToWidth(128) lightImage = QtGui.QPixmap("labelme/icons/light-img.png").scaledToWidth(128) darkImage = QtGui.QPixmap("labelme/icons/dark-img.png").scaledToWidth(128) # Create the image labels autoLabel = QtWidgets.QLabel() autoLabel.setPixmap(autoImage) lightLabel = QtWidgets.QLabel() lightLabel.setPixmap(lightImage) darkLabel = QtWidgets.QLabel() darkLabel.setPixmap(darkImage) # Create the layout layout = QtWidgets.QVBoxLayout() layout.addWidget(themeLabel) layout.addWidget(theme_note_label) buttonLayout = QtWidgets.QHBoxLayout() buttonLayout.addWidget(autoButton) buttonLayout.addWidget(lightButton) buttonLayout.addWidget(darkButton) layout.addLayout(buttonLayout) # Create the image layout imageLayout = QtWidgets.QHBoxLayout() imageLayout.addWidget(autoLabel) imageLayout.addWidget(lightLabel) imageLayout.addWidget(darkLabel) layout.addLayout(imageLayout) # Create the notification checkbox notificationCheckbox = QtWidgets.QCheckBox("Mute Notifications") notificationCheckbox.setChecked(current_mute) layout.addWidget(notificationLabel) layout.addWidget(notification_note_label) layout.addWidget(notificationCheckbox) dialog.setLayout(layout) # Create the OK and Cancel buttons okButton = QtWidgets.QPushButton("OK") cancelButton = QtWidgets.QPushButton("Cancel") # Add the buttons to a QHBoxLayout buttonLayout = QtWidgets.QHBoxLayout() buttonLayout.addWidget(okButton) buttonLayout.addWidget(cancelButton) # Add the QHBoxLayout to the QVBoxLayout layout.addLayout(buttonLayout) # Connect the OK and Cancel buttons to the accept and reject functions okButton.clicked.connect(dialog.accept) cancelButton.clicked.connect(dialog.reject) # Show the dialog if dialog.exec() == QtWidgets.QDialog.DialogCode.Accepted: # Write the new theme and notification settings to the config file if autoButton.isChecked(): theme = "auto" elif lightButton.isChecked(): theme = "light" elif darkButton.isChecked(): theme = "dark" mute = notificationCheckbox.isChecked() with open("labelme/config/default_config.yaml", "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) config["theme"] = theme config["mute"] = mute with open("labelme/config/default_config.yaml", "w") as f: yaml.dump(config, f) ================================================ FILE: DLTA_AI_app/labelme/widgets/runtime_data_UI.py ================================================ from PyQt6.QtWidgets import QDialog, QLabel, QVBoxLayout from PyQt6.QtGui import QFont from PyQt6 import QtCore import psutil import torch def PopUp(): """ Description: This function displays a dialog box with information about the runtime data of the system, including GPU and RAM stats. Parameters: This function takes no parameters. Returns: This function does not return anything. Libraries: This function requires the following libraries to be installed: - PyQt6 - psutil - torch """ # Create a dialog box to display the runtime data dialog = QDialog() dialog.setWindowTitle("Runtime data") dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) layout = QVBoxLayout(dialog) layout.setContentsMargins(20, 20, 20, 20) layout.setSpacing(10) # Set font styles for the title and normal text title_font = QFont() title_font.setPointSize(12) title_font.setBold(True) normal_font = QFont() normal_font.setPointSize(10) # If CUDA is available, display GPU stats if torch.cuda.is_available(): device_name = torch.cuda.get_device_name(0) gpu_title_label = QLabel("Device Stats") gpu_title_label.setFont(title_font) layout.addWidget(gpu_title_label) gpu_name_label = QLabel(f"GPU Name: {device_name}") gpu_name_label.setFont(normal_font) layout.addWidget(gpu_name_label) total_vram = round(torch.cuda.get_device_properties(0).total_memory / (1024 ** 3), 2) used_vram = round(torch.cuda.memory_allocated(0) / (1024 ** 3), 2) gpu_vram_label = QLabel(f"Total GPU VRAM: {total_vram} GB\nUsed: {used_vram} GB") gpu_vram_label.setFont(normal_font) layout.addWidget(gpu_vram_label) # If CUDA is not available, display CPU stats else: cpu_label = QLabel("DLTA-AI is Using CPU") cpu_label.setFont(title_font) layout.addWidget(cpu_label) # Display RAM stats ram_title_label = QLabel("RAM Stats") ram_title_label.setFont(title_font) layout.addWidget(ram_title_label) total_ram = round(psutil.virtual_memory().total / (1024 ** 3), 2) used_ram = round(psutil.virtual_memory().used / (1024 ** 3), 2) ram_label = QLabel(f"Total RAM: {total_ram} GB\nUsed: {used_ram} GB") ram_label.setFont(normal_font) layout.addWidget(ram_label) # Display the dialog box dialog.exec() ================================================ FILE: DLTA_AI_app/labelme/widgets/scaleObject_UI.py ================================================ from PyQt6 import QtCore from PyQt6.QtCore import Qt from PyQt6 import QtWidgets from labelme.utils.helpers.mathOps import scaleQTshape def PopUp(self): """ Summary: Show a dialog to scale a shape. Args: self: the main window object to access the canvas Returns: result: the result of the dialog """ originalshape = self.canvas.selectedShapes[0].copy() xx = [originalshape.points[i].x() for i in range(len(originalshape.points))] yy = [originalshape.points[i].y() for i in range(len(originalshape.points))] center = [sum(xx) / len(xx), sum(yy) / len(yy)] dialog = QtWidgets.QDialog() dialog.setWindowTitle("Scaling") dialog.setWindowModality(Qt.WindowModality.ApplicationModal) dialog.resize(400, 400) layout = QtWidgets.QVBoxLayout() label = QtWidgets.QLabel( "Scaling object with ID: " + str(originalshape.group_id) + "\n ") label.setStyleSheet( "QLabel { font-weight: bold; }") layout.addWidget(label) xLabel = QtWidgets.QLabel() xLabel.setText("Width(x) factor is: " + "100" + "%") yLabel = QtWidgets.QLabel() yLabel.setText("Hight(y) factor is: " + "100" + "%") xSlider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal) xSlider.setMinimum(50) xSlider.setMaximum(150) xSlider.setValue(100) xSlider.setTickPosition( QtWidgets.QSlider.TickPosition.TicksBelow) xSlider.setTickInterval(1) xSlider.setMaximumWidth(750) xSlider.valueChanged.connect(lambda: xLabel.setText( "Width(x) factor is: " + str(xSlider.value()) + "%")) xSlider.valueChanged.connect(lambda: scaleQTshape(self, originalshape, center, xSlider.value(), ySlider.value())) ySlider = QtWidgets.QSlider(QtCore.Qt.Orientation.Vertical) ySlider.setMinimum(50) ySlider.setMaximum(150) ySlider.setValue(100) ySlider.setTickPosition( QtWidgets.QSlider.TickPosition.TicksBelow) ySlider.setTickInterval(1) ySlider.setMaximumWidth(750) ySlider.valueChanged.connect(lambda: yLabel.setText( "Hight(y) factor is: " + str(ySlider.value()) + "%")) ySlider.valueChanged.connect(lambda: scaleQTshape(self, originalshape, center, xSlider.value(), ySlider.value())) layout.addWidget(xLabel) layout.addWidget(yLabel) layout.addWidget(xSlider) layout.addWidget(ySlider) buttonBox = QtWidgets.QDialogButtonBox( QtWidgets.QDialogButtonBox.StandardButton.Ok) buttonBox.accepted.connect(dialog.accept) layout.addWidget(buttonBox) dialog.setLayout(layout) result = dialog.exec() return result ================================================ FILE: DLTA_AI_app/labelme/widgets/segmentation_options_UI.py ================================================ # relevant imports for the functions from PyQt6 import QtCore from PyQt6 import QtWidgets import yaml from ..utils.helpers.mathOps import color_palette coco_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] # make a list of 12 unique colors as we will use them to draw bounding boxes of different classes in different colors # so the calor palette will be used to draw bounding boxes of different classes in different colors # the color pallette should have the famous 12 colors as red, green, blue, yellow, cyan, magenta, white, black, gray, brown, pink, and orange in bgr format class SegmentationOptionsUI(): def __init__(self, parent): self.parent = parent self.conf_threshold = 0.3 self.iou_threshold = 0.5 with open ("labelme/config/default_config.yaml") as f: self.config = yaml.load(f, Loader=yaml.FullLoader) self.default_classes = self.config["default_classes"] try: self.selectedclasses = {} for class_ in self.default_classes: if class_ in coco_classes: index = coco_classes.index(class_) self.selectedclasses[index] = class_ except: self.selectedclasses = {i:class_ for i,class_ in enumerate(coco_classes)} print("error in loading the default classes from the config file, so we will use all the coco classes") # get the thresold as input from the user def setConfThreshold(self, prev_threshold=0.3): dialog = QtWidgets.QDialog(self.parent) dialog.setWindowTitle('Threshold Selector') dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) layout = QtWidgets.QVBoxLayout(dialog) label = QtWidgets.QLabel('Enter Confidence Threshold') layout.addWidget(label) slider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal) slider.setMinimum(1) slider.setMaximum(100) slider.setValue(int(prev_threshold * 100)) text_input = QtWidgets.QLineEdit(str(prev_threshold)) def on_slider_change(value): text_input.setText(str(value / 100)) def on_text_change(text): try: value = float(text) slider.setValue(int(value * 100)) except ValueError: pass slider.valueChanged.connect(on_slider_change) text_input.textChanged.connect(on_text_change) layout.addWidget(slider) layout.addWidget(text_input) button_box = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.StandardButton.Ok | QtWidgets.QDialogButtonBox.StandardButton.Cancel) layout.addWidget(button_box) def on_ok(): threshold = float(text_input.text()) dialog.accept() return threshold def on_cancel(): dialog.reject() return prev_threshold button_box.accepted.connect(on_ok) button_box.rejected.connect(on_cancel) if dialog.exec() == QtWidgets.QDialog.DialogCode.Accepted: return slider.value() / 100 else: return prev_threshold def setIOUThreshold(self, prev_threshold=0.5): dialog = QtWidgets.QDialog(self.parent) dialog.setWindowTitle('Threshold Selector') dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) layout = QtWidgets.QVBoxLayout(dialog) label = QtWidgets.QLabel('Enter IOU Threshold') layout.addWidget(label) slider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal) slider.setMinimum(1) slider.setMaximum(100) slider.setValue(int(prev_threshold * 100)) text_input = QtWidgets.QLineEdit(str(prev_threshold)) def on_slider_change(value): text_input.setText(str(value / 100)) def on_text_change(text): try: value = float(text) slider.setValue(int(value * 100)) except ValueError: pass slider.valueChanged.connect(on_slider_change) text_input.textChanged.connect(on_text_change) layout.addWidget(slider) layout.addWidget(text_input) button_box = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.StandardButton.Ok | QtWidgets.QDialogButtonBox.StandardButton.Cancel) layout.addWidget(button_box) def on_ok(): threshold = float(text_input.text()) dialog.accept() return threshold def on_cancel(): dialog.reject() return prev_threshold button_box.accepted.connect(on_ok) button_box.rejected.connect(on_cancel) if dialog.exec() == QtWidgets.QDialog.DialogCode.Accepted: return slider.value() / 100 else: return prev_threshold # add a resizable and scrollable dialog that contains all coco classes and allow the user to select among them using checkboxes def selectClasses(self): """ Display a dialog box that allows the user to select which classes to annotate. The function creates a QDialog object and adds various widgets to it, including a QScrollArea that contains QCheckBox widgets for each class. The function sets the state of each QCheckBox based on whether the class is in the self.selectedclasses dictionary. The function also adds "Select All", "Deselect All", "Select Classes", "Set as Default", and "Cancel" buttons to the dialog box. When the user clicks the "Select Classes" button, the function saves the selected classes to the self.selectedclasses dictionary and returns it. :return: A dictionary that maps class indices to class names for the selected classes. """ # Create a new dialog box dialog = QtWidgets.QDialog(self.parent) dialog.setWindowTitle('Select Classes') dialog.setWindowModality(QtCore.Qt.WindowModality.ApplicationModal) dialog.resize(500, 500) dialog.setMinimumSize(QtCore.QSize(500, 500)) dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) # Create a vertical layout for the dialog box verticalLayout = QtWidgets.QVBoxLayout(dialog) verticalLayout.setObjectName("verticalLayout") # Create a horizontal layout for the "Select All" and "Deselect All" buttons horizontalLayout = QtWidgets.QHBoxLayout() selectAllButton = QtWidgets.QPushButton("Select All", dialog) deselectAllButton = QtWidgets.QPushButton("Deselect All", dialog) horizontalLayout.addWidget(selectAllButton) horizontalLayout.addWidget(deselectAllButton) verticalLayout.addLayout(horizontalLayout) # Create a scroll area for the class checkboxes scrollArea = QtWidgets.QScrollArea(dialog) scrollArea.setWidgetResizable(True) scrollArea.setObjectName("scrollArea") scrollAreaWidgetContents = QtWidgets.QWidget() scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 478, 478)) scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents") gridLayout = QtWidgets.QGridLayout(scrollAreaWidgetContents) gridLayout.setObjectName("gridLayout") self.scrollAreaWidgetContents = scrollAreaWidgetContents scrollArea.setWidget(scrollAreaWidgetContents) verticalLayout.addWidget(scrollArea) # Create a button box for the "Select Classes", "Set as Default", and "Cancel" buttons buttonBox = QtWidgets.QDialogButtonBox(dialog) buttonBox.setOrientation(QtCore.Qt.Orientation.Horizontal) buttonBox.setStandardButtons( QtWidgets.QDialogButtonBox.StandardButton.Cancel | QtWidgets.QDialogButtonBox.StandardButton.Ok) buttonBox.setObjectName("buttonBox") buttonBox.button(QtWidgets.QDialogButtonBox.StandardButton.Ok).setText("Select Classes") defaultButton = QtWidgets.QPushButton("Set as Default", dialog) buttonBox.addButton(defaultButton, QtWidgets.QDialogButtonBox.ButtonRole.ActionRole) # Add the buttons to a QHBoxLayout buttonLayout = QtWidgets.QHBoxLayout() buttonLayout.addWidget(buttonBox.button(QtWidgets.QDialogButtonBox.StandardButton.Ok)) buttonLayout.addWidget(defaultButton) buttonLayout.addWidget(buttonBox.button(QtWidgets.QDialogButtonBox.StandardButton.Cancel)) # Add the QHBoxLayout to the QVBoxLayout verticalLayout.addLayout(buttonLayout) # Connect the button signals to their respective slots buttonBox.accepted.connect(lambda: self.saveClasses(dialog)) buttonBox.rejected.connect(dialog.reject) defaultButton.clicked.connect(lambda: self.saveClasses(dialog, True)) # Create a QCheckBox for each class and add it to the grid layout self.classes = [] for i in range(len(coco_classes)): self.classes.append(QtWidgets.QCheckBox(coco_classes[i], dialog)) row = i // 3 col = i % 3 gridLayout.addWidget(self.classes[i], row, col) # Set the state of each QCheckBox based on whether the class is in the self.selectedclasses dictionary for value in self.selectedclasses.values(): if value != None: indx = coco_classes.index(value) self.classes[indx].setChecked(True) # Connect the "Select All" and "Deselect All" buttons to their respective slots selectAllButton.clicked.connect(lambda: self.selectAll()) deselectAllButton.clicked.connect(lambda: self.deselectAll()) # Show the dialog box and wait for the user to close it dialog.show() dialog.exec() # Save the selected classes to the self.selectedclasses dictionary and return it self.selectedclasses.clear() for i in range(len(self.classes)): if self.classes[i].isChecked(): indx = coco_classes.index(self.classes[i].text()) self.selectedclasses[indx] = self.classes[i].text() return self.selectedclasses def saveClasses(self, dialog, is_default=False): """ Save the selected classes to the self.selectedclasses dictionary. The function clears the self.selectedclasses dictionary and then iterates over the QCheckBox widgets for each class. If a QCheckBox is checked, the function adds the corresponding class name to the self.selectedclasses dictionary. If the is_default parameter is True, the function also updates the default_config.yaml file with the selected classes. :param dialog: The QDialog object that contains the class selection dialog. :param is_default: A boolean that indicates whether to update the default_config.yaml file with the selected classes. """ # Clear the self.selectedclasses dictionary self.selectedclasses.clear() # Iterate over the QCheckBox widgets for each class for i in range(len(self.classes)): if self.classes[i].isChecked(): indx = coco_classes.index(self.classes[i].text()) self.selectedclasses[indx] = self.classes[i].text() # If is_default is True, update the default_config.yaml file with the selected classes if is_default: with open("labelme/config/default_config.yaml", 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) config['default_classes'] = list(self.selectedclasses.values()) with open("labelme/config/default_config.yaml", 'w') as f: yaml.dump(config, f) # Accept the dialog box dialog.accept() def selectAll(self): """ Select all classes in the class selection dialog. The function iterates over the QCheckBox widgets for each class and sets their checked state to True. """ # Iterate over the QCheckBox widgets for each class and set their checked state to True for checkbox in self.classes: checkbox.setChecked(True) def deselectAll(self): """ Deselect all classes in the class selection dialog. The function iterates over the QCheckBox widgets for each class and sets their checked state to False. """ # Iterate over the QCheckBox widgets for each class and set their checked state to False for checkbox in self.classes: checkbox.setChecked(False) ================================================ FILE: DLTA_AI_app/labelme/widgets/shortcut_selector_UI.py ================================================ import yaml from PyQt6 import QtWidgets, QtGui, QtCore def PopUp(): """ Displays a dialog box for selecting and editing keyboard shortcuts for the application. Parameters: None Returns: None """ # Load the default shortcuts from the config file shortcuts = {} with open("labelme/config/default_config.yaml", "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) shortcuts = config.get("shortcuts", {}) # Encode the shortcut names for display in the table shortcuts_names_encode = {name: name.lower().capitalize().replace("_", " ").replace("Sam", "SAM").replace("sam", "SAM") for name in shortcuts.keys()} # Decode the shortcut names back to their original form shortcuts_names_decode = {value: key for key, value in shortcuts_names_encode.items()} # Change the keys of the shortcuts dictionary to use the encoded names shortcuts = {shortcuts_names_encode[key]: value for key, value in shortcuts.items()} # Create a table to display the shortcuts shortcut_table = QtWidgets.QTableWidget() shortcut_table.setColumnCount(2) shortcut_table.setHorizontalHeaderLabels(['Function', 'Shortcut']) shortcut_table.setRowCount(len(shortcuts)) shortcut_table.verticalHeader().setVisible(False) # Populate the table with the shortcut names and keys row = 0 for name, key in shortcuts.items(): name_item = QtWidgets.QTableWidgetItem(name) shortcut_item = QtWidgets.QTableWidgetItem(key) shortcut_table.setItem(row, 0, name_item) shortcut_table.setItem(row, 1, shortcut_item) row += 1 # Define a function to handle clicks on the shortcut table def on_shortcut_table_clicked(item): row = item.row() name_item = shortcut_table.item(row, 0) name = name_item.text() current_key = shortcuts[name] key_edit = QtWidgets.QKeySequenceEdit(QtGui.QKeySequence(current_key)) key_edit.setWindowTitle(f"Edit Shortcut for {name}") key_edit_label = QtWidgets.QLabel("Enter new shortcut for " + name) dialog = QtWidgets.QDialog() dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) dialog.setWindowTitle("Shortcut Selector") layout = QtWidgets.QVBoxLayout() layout.addWidget(key_edit_label) layout.addWidget(key_edit) ok_button = QtWidgets.QPushButton("OK") ok_button.clicked.connect(dialog.accept) null_hint_label = QtWidgets.QLabel("to remove shortcut, press 'Ctrl' only then click 'OK") layout.addWidget(ok_button) layout.addWidget(null_hint_label) dialog.setLayout(layout) # If the user clicks OK, update the shortcut and table if dialog.exec(): key = key_edit.keySequence().toString(QtGui.QKeySequence.SequenceFormat.NativeText) if key in shortcuts.values() and list(shortcuts.keys())[list(shortcuts.values()).index(key)] != name: conflicting_shortcut = list(shortcuts.keys())[list(shortcuts.values()).index(key)] QtWidgets.QMessageBox.warning(None, "Error", f"{key} is already assigned to {conflicting_shortcut}.") else: if key == "": key = None shortcuts[name] = key shortcut_table.item(row, 1).setText(key) def write_shortcuts_to_ui(config): shortcuts = config.get("shortcuts", {}) # Encode the shortcut names for display in the table shortcuts_names_encode = {name: name.lower().capitalize().replace("_", " ").replace("Sam", "SAM").replace("sam", "SAM") for name in shortcuts.keys()} # Change the keys of the shortcuts dictionary to use the encoded names shortcuts = {shortcuts_names_encode[key]: value for key, value in shortcuts.items()} row = 0 for name, key in shortcuts.items(): name_item = QtWidgets.QTableWidgetItem(name) shortcut_item = QtWidgets.QTableWidgetItem(key) shortcut_table.setItem(row, 0, name_item) shortcut_table.setItem(row, 1, shortcut_item) row += 1 def on_reset_button_clicked(): with open("labelme/config/default_config.yaml", "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) write_shortcuts_to_ui(config) def on_restore_button_clicked(): with open("labelme/config/default_config_base.yaml", "r") as f: configBase = yaml.load(f, Loader=yaml.FullLoader) with open("labelme/config/default_config.yaml", "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) config["shortcuts"] = configBase["shortcuts"] write_shortcuts_to_ui(config) # Connect the on_shortcut_table_clicked function to the itemClicked signal of the shortcut table shortcut_table.itemClicked.connect(on_shortcut_table_clicked) # Create a dialog box to display the shortcut table dialog = QtWidgets.QDialog() dialog.setWindowFlags(dialog.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint) dialog.setWindowTitle("Shortcuts") layout = QtWidgets.QVBoxLayout() layout.addWidget(shortcut_table) ok_button = QtWidgets.QPushButton("OK") ok_button.clicked.connect(dialog.accept) layout.addWidget(ok_button) reset_button = QtWidgets.QPushButton("Reset") reset_button.clicked.connect(on_reset_button_clicked) layout.addWidget(reset_button) restore_button = QtWidgets.QPushButton("Restore Default Shortcuts") restore_button.clicked.connect(on_restore_button_clicked) layout.addWidget(restore_button) note_label = QtWidgets.QLabel("Shortcuts will be updated after restarting the app.") layout.addWidget(note_label) dialog.setLayout(layout) # Set the size of the dialog box dialog.setMinimumWidth(shortcut_table.sizeHintForColumn(0) + shortcut_table.sizeHintForColumn(1) + 55) dialog.setMinimumHeight(shortcut_table.rowHeight(0) * 10 + 50) # Set the size policy to allow vertical resizing dialog.setSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Expanding) # Display the dialog box dialog.exec() # load shortcuts from shortcut table to be updated shortcuts = {} for row in range(shortcut_table.rowCount()): name_item = shortcut_table.item(row, 0) name = name_item.text() shortcut_item = shortcut_table.item(row, 1) shortcut = shortcut_item.text() shortcuts[name] = shortcut if shortcut != "" else None # Decode the shortcut names back to their original form shortcuts = {shortcuts_names_decode[key]: value for key, value in shortcuts.items()} # Write the updated shortcuts to the config file with open("labelme/config/default_config.yaml", "w") as f: config["shortcuts"] = shortcuts yaml.dump(config, f) ================================================ FILE: DLTA_AI_app/labelme/widgets/tool_bar.py ================================================ from PyQt6 import QtCore from PyQt6 import QtWidgets class ToolBar(QtWidgets.QToolBar): def __init__(self, title): super(ToolBar, self).__init__(title) layout = self.layout() m = (0, 0, 0, 0) layout.setSpacing(0) layout.setContentsMargins(*m) self.setContentsMargins(*m) self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowType.FramelessWindowHint) def addAction(self, action): if isinstance(action, QtWidgets.QWidgetAction): return super(ToolBar, self).addAction(action) btn = QtWidgets.QToolButton() btn.setDefaultAction(action) btn.setToolButtonStyle(self.toolButtonStyle()) self.addWidget(btn) # center align for i in range(self.layout().count()): if isinstance( self.layout().itemAt(i).widget(), QtWidgets.QToolButton ): self.layout().itemAt(i).setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) ================================================ FILE: DLTA_AI_app/labelme/widgets/unique_label_qlist_widget.py ================================================ # -*- encoding: utf-8 -*- from PyQt6.QtCore import Qt from PyQt6 import QtWidgets from .escapable_qlist_widget import EscapableQListWidget class UniqueLabelQListWidget(EscapableQListWidget): def mousePressEvent(self, event): super(UniqueLabelQListWidget, self).mousePressEvent(event) if not self.indexAt(event.pos()).isValid(): self.clearSelection() def findItemsByLabel(self, label): items = [] for row in range(self.count()): item = self.item(row) if item.data(Qt.ItemDataRole.UserRole) == label: items.append(item) return items def createItemFromLabel(self, label): item = QtWidgets.QListWidgetItem() item.setData(Qt.ItemDataRole.UserRole, label) return item def setItemLabel(self, item, label, color=None): qlabel = QtWidgets.QLabel() if color is None: qlabel.setText("{}".format(label)) else: qlabel.setText( '{} '.format( label, *color ) ) qlabel.setAlignment(Qt.AlignmentFlag.AlignBottom) item.setSizeHint(qlabel.sizeHint()) self.setItemWidget(item, qlabel) ================================================ FILE: DLTA_AI_app/labelme/widgets/zoom_widget.py ================================================ from PyQt6 import QtCore from PyQt6 import QtGui from PyQt6 import QtWidgets class ZoomWidget(QtWidgets.QSpinBox): def __init__(self, value=100): super(ZoomWidget, self).__init__() self.setButtonSymbols(QtWidgets.QAbstractSpinBox.ButtonSymbols.NoButtons) self.setRange(1, 1000) self.setSuffix(" %") self.setValue(value) self.setToolTip("Zoom Level") self.setStatusTip(self.toolTip()) self.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) def minimumSizeHint(self): height = super(ZoomWidget, self).minimumSizeHint().height() fm = QtGui.QFontMetrics(self.font()) width = fm.width(str(self.maximum())) return QtCore.QSize(width, height) ================================================ FILE: DLTA_AI_app/mmdetection/.circleci/config.yml ================================================ version: 2.1 jobs: lint: docker: - image: cimg/python:3.7.4 steps: - checkout - run: name: Install pre-commit hook command: | pip install pre-commit pre-commit install - run: name: Linting command: pre-commit run --all-files - run: name: Check docstring coverage command: | pip install interrogate interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 50 mmdet build_cpu: parameters: # The python version must match available image tags in # https://circleci.com/developer/images/image/cimg/python python: type: string default: "3.7.4" torch: type: string torchvision: type: string docker: - image: cimg/python:<< parameters.python >> resource_class: large steps: - checkout - run: name: Install Libraries command: | sudo apt-get update sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5 - run: name: Configure Python & pip command: | pip install --upgrade pip pip install wheel - run: name: Install PyTorch command: | python -V pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html - when: condition: equal: [ "3.9.0", << parameters.python >> ] steps: - run: pip install "protobuf <= 3.20.1" && sudo apt-get update && sudo apt-get -y install libprotobuf-dev protobuf-compiler cmake - run: name: Install mmdet dependencies command: | pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch<< parameters.torch >>/index.html pip install -r requirements/tests.txt -r requirements/optional.txt pip install albumentations>=0.3.2 --no-binary imgaug,albumentations pip install git+https://github.com/cocodataset/panopticapi.git - run: name: Build and install command: | pip install -e . - run: name: Run unittests command: | coverage run --branch --source mmdet -m pytest tests/ coverage xml coverage report -m build_cu101: machine: image: ubuntu-1604-cuda-10.1:201909-23 resource_class: gpu.nvidia.small steps: - checkout - run: name: Install Libraries command: | sudo apt-get update sudo apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx - run: name: Configure Python & pip command: | pyenv global 3.7.0 pip install --upgrade pip pip install wheel - run: name: Install PyTorch command: | python -V pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html - run: name: Install mmdet dependencies # pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch${{matrix.torch_version}}/index.html command: | pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch1.6.0/index.html pip install -r requirements/tests.txt -r requirements/optional.txt pip install pycocotools pip install albumentations>=0.3.2 --no-binary imgaug,albumentations pip install git+https://github.com/cocodataset/panopticapi.git python -c 'import mmcv; print(mmcv.__version__)' - run: name: Build and install command: | python setup.py check -m -s TORCH_CUDA_ARCH_LIST=7.0 pip install -e . - run: name: Run unittests command: | pytest tests/ workflows: unit_tests: jobs: - lint - build_cpu: name: build_cpu_th1.6 torch: 1.6.0 torchvision: 0.7.0 requires: - lint - build_cpu: name: build_cpu_th1.7 torch: 1.7.0 torchvision: 0.8.1 requires: - lint - build_cpu: name: build_cpu_th1.8_py3.9 torch: 1.8.0 torchvision: 0.9.0 python: "3.9.0" requires: - lint - build_cpu: name: build_cpu_th1.9_py3.8 torch: 1.9.0 torchvision: 0.10.0 python: "3.8.12" requires: - lint - build_cpu: name: build_cpu_th1.9_py3.9 torch: 1.9.0 torchvision: 0.10.0 python: "3.9.0" requires: - lint - build_cu101: requires: - build_cpu_th1.6 - build_cpu_th1.7 - build_cpu_th1.8_py3.9 - build_cpu_th1.9_py3.8 - build_cpu_th1.9_py3.9 ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/batch_test_list.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # yapf: disable atss = dict( config='configs/atss/atss_r50_fpn_1x_coco.py', checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth', eval='bbox', metric=dict(bbox_mAP=39.4), ) autoassign = dict( config='configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py', checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth', eval='bbox', metric=dict(bbox_mAP=40.4), ) carafe = dict( config='configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py', checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa eval='bbox', metric=dict(bbox_mAP=38.6), ) cascade_rcnn = [ dict( config='configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py', checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth', eval='bbox', metric=dict(bbox_mAP=40.3), ), dict( config='configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py', checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth', eval=['bbox', 'segm'], metric=dict(bbox_mAP=41.2, segm_mAP=35.9), ), ] cascade_rpn = dict( config='configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py', checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth', eval='bbox', metric=dict(bbox_mAP=40.4), ) centripetalnet = dict( config='configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py', # noqa checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa eval='bbox', metric=dict(bbox_mAP=44.7), ) cornernet = dict( config='configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py', checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa eval='bbox', metric=dict(bbox_mAP=41.2), ) dcn = dict( config='configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py', checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth', eval='bbox', metric=dict(bbox_mAP=41.3), ) deformable_detr = dict( config='configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py', checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa eval='bbox', metric=dict(bbox_mAP=44.5), ) detectors = dict( config='configs/detectors/detectors_htc_r50_1x_coco.py', checkpoint='detectors_htc_r50_1x_coco-329b1453.pth', eval=['bbox', 'segm'], metric=dict(bbox_mAP=49.1, segm_mAP=42.6), ) detr = dict( config='configs/detr/detr_r50_8x2_150e_coco.py', checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth', eval='bbox', metric=dict(bbox_mAP=40.1), ) double_heads = dict( config='configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py', checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth', eval='bbox', metric=dict(bbox_mAP=40.0), ) dynamic_rcnn = dict( config='configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py', checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth', eval='bbox', metric=dict(bbox_mAP=38.9), ) empirical_attention = dict( config='configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py', # noqa checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa eval='bbox', metric=dict(bbox_mAP=40.0), ) faster_rcnn = dict( config='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', eval='bbox', metric=dict(bbox_mAP=37.4), ) fcos = dict( config='configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py', # noqa checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa eval='bbox', metric=dict(bbox_mAP=38.7), ) foveabox = dict( config='configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py', checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth', eval='bbox', metric=dict(bbox_mAP=37.9), ) free_anchor = dict( config='configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py', checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth', eval='bbox', metric=dict(bbox_mAP=38.7), ) fsaf = dict( config='configs/fsaf/fsaf_r50_fpn_1x_coco.py', checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth', eval='bbox', metric=dict(bbox_mAP=37.4), ) gcnet = dict( config='configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py', # noqa checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa eval=['bbox', 'segm'], metric=dict(bbox_mAP=40.4, segm_mAP=36.2), ) gfl = dict( config='configs/gfl/gfl_r50_fpn_1x_coco.py', checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth', eval='bbox', metric=dict(bbox_mAP=40.2), ) gn = dict( config='configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py', checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth', eval=['bbox', 'segm'], metric=dict(bbox_mAP=40.1, segm_mAP=36.4), ) gn_ws = dict( config='configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py', checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth', eval='bbox', metric=dict(bbox_mAP=39.7), ) grid_rcnn = dict( config='configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py', checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth', eval='bbox', metric=dict(bbox_mAP=40.4), ) groie = dict( config='configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py', checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa eval='bbox', metric=dict(bbox_mAP=38.3), ) guided_anchoring = [ dict( config='configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py', # noqa checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth', eval='bbox', metric=dict(bbox_mAP=36.9), ), dict( config='configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py', checkpoint='ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth', # noqa eval='bbox', metric=dict(bbox_mAP=39.6), ), ] hrnet = dict( config='configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py', checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth', eval='bbox', metric=dict(bbox_mAP=36.9), ) htc = dict( config='configs/htc/htc_r50_fpn_1x_coco.py', checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth', eval=['bbox', 'segm'], metric=dict(bbox_mAP=42.3, segm_mAP=37.4), ) libra_rcnn = dict( config='configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py', checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth', eval='bbox', metric=dict(bbox_mAP=38.3), ) mask_rcnn = dict( config='configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py', checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth', eval=['bbox', 'segm'], metric=dict(bbox_mAP=38.2, segm_mAP=34.7), ) ms_rcnn = dict( config='configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py', checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth', eval=['bbox', 'segm'], metric=dict(bbox_mAP=38.2, segm_mAP=36.0), ) nas_fcos = dict( config='configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', # noqa checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa eval='bbox', metric=dict(bbox_mAP=39.4), ) nas_fpn = dict( config='configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py', checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth', eval='bbox', metric=dict(bbox_mAP=40.5), ) paa = dict( config='configs/paa/paa_r50_fpn_1x_coco.py', checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth', eval='bbox', metric=dict(bbox_mAP=40.4), ) pafpn = dict( config='configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py', checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa eval='bbox', metric=dict(bbox_mAP=37.5), ) pisa = dict( config='configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py', checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth', eval='bbox', metric=dict(bbox_mAP=38.4), ) point_rend = dict( config='configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py', checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth', eval=['bbox', 'segm'], metric=dict(bbox_mAP=38.4, segm_mAP=36.3), ) regnet = dict( config='configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py', checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa eval=['bbox', 'segm'], metric=dict(bbox_mAP=40.4, segm_mAP=36.7), ) reppoints = dict( config='configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py', checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth', eval='bbox', metric=dict(bbox_mAP=37.0), ) res2net = dict( config='configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py', checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth', eval='bbox', metric=dict(bbox_mAP=43.0), ) resnest = dict( config='configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py', # noqa checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa eval='bbox', metric=dict(bbox_mAP=42.0), ) retinanet = dict( config='configs/retinanet/retinanet_r50_fpn_1x_coco.py', checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth', eval='bbox', metric=dict(bbox_mAP=36.5), ) rpn = dict( config='configs/rpn/rpn_r50_fpn_1x_coco.py', checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth', eval='proposal_fast', metric=dict(AR_1000=58.2), ) sabl = [ dict( config='configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py', checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth', eval='bbox', metric=dict(bbox_mAP=37.7), ), dict( config='configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py', checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth', eval='bbox', metric=dict(bbox_mAP=39.9), ), ] scnet = dict( config='configs/scnet/scnet_r50_fpn_1x_coco.py', checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth', eval='bbox', metric=dict(bbox_mAP=43.5), ) sparse_rcnn = dict( config='configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py', checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth', eval='bbox', metric=dict(bbox_mAP=37.9), ) ssd = [ dict( config='configs/ssd/ssd300_coco.py', checkpoint='ssd300_coco_20210803_015428-d231a06e.pth', eval='bbox', metric=dict(bbox_mAP=25.5), ), dict( config='configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py', checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',# noqa eval='bbox', metric=dict(bbox_mAP=21.3), ), ] tridentnet = dict( config='configs/tridentnet/tridentnet_r50_caffe_1x_coco.py', checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth', eval='bbox', metric=dict(bbox_mAP=37.6), ) vfnet = dict( config='configs/vfnet/vfnet_r50_fpn_1x_coco.py', checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth', eval='bbox', metric=dict(bbox_mAP=41.6), ) yolact = dict( config='configs/yolact/yolact_r50_1x8_coco.py', checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth', eval=['bbox', 'segm'], metric=dict(bbox_mAP=31.2, segm_mAP=29.0), ) yolo = dict( config='configs/yolo/yolov3_d53_320_273e_coco.py', checkpoint='yolov3_d53_320_273e_coco-421362b6.pth', eval='bbox', metric=dict(bbox_mAP=27.9), ) yolof = dict( config='configs/yolof/yolof_r50_c5_8x8_1x_coco.py', checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth', eval='bbox', metric=dict(bbox_mAP=37.5), ) centernet = dict( config='configs/centernet/centernet_resnet18_dcnv2_140e_coco.py', checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa eval='bbox', metric=dict(bbox_mAP=29.5), ) yolox = dict( config='configs/yolox/yolox_tiny_8x8_300e_coco.py', checkpoint='yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth', # noqa eval='bbox', metric=dict(bbox_mAP=31.5), ) # yapf: enable ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/batch_train_list.txt ================================================ configs/atss/atss_r50_fpn_1x_coco.py configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py configs/detectors/detectors_htc_r50_1x_coco.py configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py configs/detr/detr_r50_8x2_150e_coco.py configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py configs/fsaf/fsaf_r50_fpn_1x_coco.py configs/gfl/gfl_r50_fpn_1x_coco.py configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py configs/htc/htc_r50_fpn_1x_coco.py configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py configs/paa/paa_r50_fpn_1x_coco.py configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py configs/rpn/rpn_r50_fpn_1x_coco.py configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py configs/ssd/ssd300_coco.py configs/tridentnet/tridentnet_r50_caffe_1x_coco.py configs/vfnet/vfnet_r50_fpn_1x_coco.py configs/yolact/yolact_r50_8x8_coco.py configs/yolo/yolov3_d53_320_273e_coco.py configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py configs/scnet/scnet_r50_fpn_1x_coco.py configs/yolof/yolof_r50_c5_8x8_1x_coco.py configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py configs/centernet/centernet_resnet18_dcnv2_140e_coco.py configs/yolox/yolox_tiny_8x8_300e_coco.py configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/benchmark_filter.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp def parse_args(): parser = argparse.ArgumentParser(description='Filter configs to train') parser.add_argument( '--basic-arch', action='store_true', help='to train models in basic arch') parser.add_argument( '--datasets', action='store_true', help='to train models in dataset') parser.add_argument( '--data-pipeline', action='store_true', help='to train models related to data pipeline, e.g. augmentations') parser.add_argument( '--nn-module', action='store_true', help='to train models related to neural network modules') parser.add_argument( '--model-options', nargs='+', help='custom options to special model benchmark') parser.add_argument( '--out', type=str, default='batch_train_list.txt', help='output path of gathered metrics to be stored') args = parser.parse_args() return args basic_arch_root = [ 'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet', 'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads', 'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor', 'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld', 'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa', 'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet', 'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet' ] datasets_root = [ 'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion' ] data_pipeline_root = ['albu_example', 'instaboost'] nn_module_root = [ 'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet', 'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie' ] benchmark_pool = [ 'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py', 'configs/atss/atss_r50_fpn_1x_coco.py', 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py', 'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py', 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py', 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py', 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py', 'configs/centripetalnet/' 'centripetalnet_hourglass104_mstest_16x6_210e_coco.py', 'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py', 'configs/cornernet/' 'cornernet_hourglass104_mstest_8x6_210e_coco.py', 'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py', 'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py', 'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py', 'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py', 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py', 'configs/detectors/detectors_htc_r50_1x_coco.py', 'configs/detr/detr_r50_8x2_150e_coco.py', 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py', 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py', 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', 'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py', 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py', 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py', 'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py', 'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py', 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py', 'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py', 'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py', 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py', 'configs/fsaf/fsaf_r50_fpn_1x_coco.py', 'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py', 'configs/gfl/gfl_r50_fpn_1x_coco.py', 'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py', 'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py', 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py', 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py', 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py', 'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py', 'configs/htc/htc_r50_fpn_1x_coco.py', 'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py', 'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py', 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py', 'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py', 'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py', 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py', 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py', 'configs/paa/paa_r50_fpn_1x_coco.py', 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py', 'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py', 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py', 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py', 'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py', 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py', 'configs/resnest/' 'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py', 'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py', 'configs/rpn/rpn_r50_fpn_1x_coco.py', 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py', 'configs/ssd/ssd300_coco.py', 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py', 'configs/vfnet/vfnet_r50_fpn_1x_coco.py', 'configs/yolact/yolact_r50_1x8_coco.py', 'configs/yolo/yolov3_d53_320_273e_coco.py', 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py', 'configs/scnet/scnet_r50_fpn_1x_coco.py', 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py', ] def main(): args = parse_args() benchmark_type = [] if args.basic_arch: benchmark_type += basic_arch_root if args.datasets: benchmark_type += datasets_root if args.data_pipeline: benchmark_type += data_pipeline_root if args.nn_module: benchmark_type += nn_module_root special_model = args.model_options if special_model is not None: benchmark_type += special_model config_dpath = 'configs/' benchmark_configs = [] for cfg_root in benchmark_type: cfg_dir = osp.join(config_dpath, cfg_root) configs = os.scandir(cfg_dir) for cfg in configs: config_path = osp.join(cfg_dir, cfg.name) if (config_path in benchmark_pool and config_path not in benchmark_configs): benchmark_configs.append(config_path) print(f'Totally found {len(benchmark_configs)} configs to benchmark') with open(args.out, 'w') as f: for config in benchmark_configs: f.write(config + '\n') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/benchmark_inference_fps.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp import mmcv from mmcv import Config, DictAction from mmcv.runner import init_dist from terminaltables import GithubFlavoredMarkdownTable from tools.analysis_tools.benchmark import repeat_measure_inference_speed def parse_args(): parser = argparse.ArgumentParser( description='MMDet benchmark a model of FPS') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint_root', help='Checkpoint file root path') parser.add_argument( '--round-num', type=int, default=1, help='round a number to a given precision in decimal digits') parser.add_argument( '--repeat-num', type=int, default=1, help='number of repeat times of measurement for averaging the results') parser.add_argument( '--out', type=str, help='output path of gathered fps to be stored') parser.add_argument( '--max-iter', type=int, default=2000, help='num of max iter') parser.add_argument( '--log-interval', type=int, default=50, help='interval of logging') parser.add_argument( '--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increase' 'the inference speed') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def results2markdown(result_dict): table_data = [] is_multiple_results = False for cfg_name, value in result_dict.items(): name = cfg_name.replace('configs/', '') fps = value['fps'] ms_times_pre_image = value['ms_times_pre_image'] if isinstance(fps, list): is_multiple_results = True mean_fps = value['mean_fps'] mean_times_pre_image = value['mean_times_pre_image'] fps_str = ','.join([str(s) for s in fps]) ms_times_pre_image_str = ','.join( [str(s) for s in ms_times_pre_image]) table_data.append([ name, fps_str, mean_fps, ms_times_pre_image_str, mean_times_pre_image ]) else: table_data.append([name, fps, ms_times_pre_image]) if is_multiple_results: table_data.insert(0, [ 'model', 'fps', 'mean_fps', 'times_pre_image(ms)', 'mean_times_pre_image(ms)' ]) else: table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)']) table = GithubFlavoredMarkdownTable(table_data) print(table.table, flush=True) if __name__ == '__main__': args = parse_args() assert args.round_num >= 0 assert args.repeat_num >= 1 config = Config.fromfile(args.config) if args.launcher == 'none': raise NotImplementedError('Only supports distributed mode') else: init_dist(args.launcher) result_dict = {} for model_key in config: model_infos = config[model_key] if not isinstance(model_infos, list): model_infos = [model_infos] for model_info in model_infos: record_metrics = model_info['metric'] cfg_path = model_info['config'].strip() cfg = Config.fromfile(cfg_path) checkpoint = osp.join(args.checkpoint_root, model_info['checkpoint'].strip()) try: fps = repeat_measure_inference_speed(cfg, checkpoint, args.max_iter, args.log_interval, args.fuse_conv_bn, args.repeat_num) if args.repeat_num > 1: fps_list = [round(fps_, args.round_num) for fps_ in fps] times_pre_image_list = [ round(1000 / fps_, args.round_num) for fps_ in fps ] mean_fps = round( sum(fps_list) / len(fps_list), args.round_num) mean_times_pre_image = round( sum(times_pre_image_list) / len(times_pre_image_list), args.round_num) print( f'{cfg_path} ' f'Overall fps: {fps_list}[{mean_fps}] img / s, ' f'times per image: ' f'{times_pre_image_list}[{mean_times_pre_image}] ' f'ms / img', flush=True) result_dict[cfg_path] = dict( fps=fps_list, mean_fps=mean_fps, ms_times_pre_image=times_pre_image_list, mean_times_pre_image=mean_times_pre_image) else: print( f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, ' f'times per image: {1000 / fps:.{args.round_num}f} ' f'ms / img', flush=True) result_dict[cfg_path] = dict( fps=round(fps, args.round_num), ms_times_pre_image=round(1000 / fps, args.round_num)) except Exception as e: print(f'{cfg_path} error: {repr(e)}') if args.repeat_num > 1: result_dict[cfg_path] = dict( fps=[0], mean_fps=0, ms_times_pre_image=[0], mean_times_pre_image=0) else: result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0) if args.out: mmcv.mkdir_or_exist(args.out) mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json')) results2markdown(result_dict) ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/benchmark_test_image.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import logging import os.path as osp from argparse import ArgumentParser from mmcv import Config from mmdet.apis import inference_detector, init_detector, show_result_pyplot from mmdet.utils import get_root_logger def parse_args(): parser = ArgumentParser() parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint_root', help='Checkpoint file root path') parser.add_argument('--img', default='demo/demo.jpg', help='Image file') parser.add_argument('--aug', action='store_true', help='aug test') parser.add_argument('--model-name', help='model name to inference') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument( '--wait-time', type=float, default=1, help='the interval of show (s), 0 is block') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--score-thr', type=float, default=0.3, help='bbox score threshold') args = parser.parse_args() return args def inference_model(config_name, checkpoint, args, logger=None): cfg = Config.fromfile(config_name) if args.aug: if 'flip' in cfg.data.test.pipeline[1]: cfg.data.test.pipeline[1].flip = True else: if logger is not None: logger.error(f'{config_name}: unable to start aug test') else: print(f'{config_name}: unable to start aug test', flush=True) model = init_detector(cfg, checkpoint, device=args.device) # test a single image result = inference_detector(model, args.img) # show the results if args.show: show_result_pyplot( model, args.img, result, score_thr=args.score_thr, wait_time=args.wait_time) return result # Sample test whether the inference code is correct def main(args): config = Config.fromfile(args.config) # test single model if args.model_name: if args.model_name in config: model_infos = config[args.model_name] if not isinstance(model_infos, list): model_infos = [model_infos] model_info = model_infos[0] config_name = model_info['config'].strip() print(f'processing: {config_name}', flush=True) checkpoint = osp.join(args.checkpoint_root, model_info['checkpoint'].strip()) # build the model from a config file and a checkpoint file inference_model(config_name, checkpoint, args) return else: raise RuntimeError('model name input error.') # test all model logger = get_root_logger( log_file='benchmark_test_image.log', log_level=logging.ERROR) for model_key in config: model_infos = config[model_key] if not isinstance(model_infos, list): model_infos = [model_infos] for model_info in model_infos: print('processing: ', model_info['config'], flush=True) config_name = model_info['config'].strip() checkpoint = osp.join(args.checkpoint_root, model_info['checkpoint'].strip()) try: # build the model from a config file and a checkpoint file inference_model(config_name, checkpoint, args, logger) except Exception as e: logger.error(f'{config_name} " : {repr(e)}') if __name__ == '__main__': args = parse_args() main(args) ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/check_links.py ================================================ # Modified from: # https://github.com/allenai/allennlp/blob/main/scripts/check_links.py import argparse import logging import os import pathlib import re import sys from multiprocessing.dummy import Pool from typing import NamedTuple, Optional, Tuple import requests from mmcv.utils import get_logger def parse_args(): parser = argparse.ArgumentParser( description='Goes through all the inline-links ' 'in markdown files and reports the breakages') parser.add_argument( '--num-threads', type=int, default=100, help='Number of processes to confirm the link') parser.add_argument('--https-proxy', type=str, help='https proxy') parser.add_argument( '--out', type=str, default='link_reports.txt', help='output path of reports') args = parser.parse_args() return args OK_STATUS_CODES = ( 200, 401, # the resource exists but may require some sort of login. 403, # ^ same 405, # HEAD method not allowed. # the resource exists, but our default 'Accept-' header may not # match what the server can provide. 406, ) class MatchTuple(NamedTuple): source: str name: str link: str def check_link( match_tuple: MatchTuple, http_session: requests.Session, logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]: reason: Optional[str] = None if match_tuple.link.startswith('http'): result_ok, reason = check_url(match_tuple, http_session) else: result_ok = check_path(match_tuple) if logger is None: print(f" {'✓' if result_ok else '✗'} {match_tuple.link}") else: logger.info(f" {'✓' if result_ok else '✗'} {match_tuple.link}") return match_tuple, result_ok, reason def check_url(match_tuple: MatchTuple, http_session: requests.Session) -> Tuple[bool, str]: """Check if a URL is reachable.""" try: result = http_session.head( match_tuple.link, timeout=5, allow_redirects=True) return ( result.ok or result.status_code in OK_STATUS_CODES, f'status code = {result.status_code}', ) except (requests.ConnectionError, requests.Timeout): return False, 'connection error' def check_path(match_tuple: MatchTuple) -> bool: """Check if a file in this repository exists.""" relative_path = match_tuple.link.split('#')[0] full_path = os.path.join( os.path.dirname(str(match_tuple.source)), relative_path) return os.path.exists(full_path) def main(): args = parse_args() # setup logger logger = get_logger(name='mmdet', log_file=args.out) # setup https_proxy if args.https_proxy: os.environ['https_proxy'] = args.https_proxy # setup http_session http_session = requests.Session() for resource_prefix in ('http://', 'https://'): http_session.mount( resource_prefix, requests.adapters.HTTPAdapter( max_retries=5, pool_connections=20, pool_maxsize=args.num_threads), ) logger.info('Finding all markdown files in the current directory...') project_root = (pathlib.Path(__file__).parent / '..').resolve() markdown_files = project_root.glob('**/*.md') all_matches = set() url_regex = re.compile(r'\[([^!][^\]]+)\]\(([^)(]+)\)') for markdown_file in markdown_files: with open(markdown_file) as handle: for line in handle.readlines(): matches = url_regex.findall(line) for name, link in matches: if 'localhost' not in link: all_matches.add( MatchTuple( source=str(markdown_file), name=name, link=link)) logger.info(f' {len(all_matches)} markdown files found') logger.info('Checking to make sure we can retrieve each link...') with Pool(processes=args.num_threads) as pool: results = pool.starmap(check_link, [(match, http_session, logger) for match in list(all_matches)]) # collect unreachable results unreachable_results = [(match_tuple, reason) for match_tuple, success, reason in results if not success] if unreachable_results: logger.info('================================================') logger.info(f'Unreachable links ({len(unreachable_results)}):') for match_tuple, reason in unreachable_results: logger.info(' > Source: ' + match_tuple.source) logger.info(' Name: ' + match_tuple.name) logger.info(' Link: ' + match_tuple.link) if reason is not None: logger.info(' Reason: ' + reason) sys.exit(1) logger.info('No Unreachable link found.') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/convert_test_benchmark_script.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp from mmcv import Config def parse_args(): parser = argparse.ArgumentParser( description='Convert benchmark model list to script') parser.add_argument('config', help='test config file path') parser.add_argument('--port', type=int, default=29666, help='dist port') parser.add_argument( '--work-dir', default='tools/batch_test', help='the dir to save metric') parser.add_argument( '--run', action='store_true', help='run script directly') parser.add_argument( '--out', type=str, help='path to save model benchmark script') args = parser.parse_args() return args def process_model_info(model_info, work_dir): config = model_info['config'].strip() fname, _ = osp.splitext(osp.basename(config)) job_name = fname work_dir = osp.join(work_dir, fname) checkpoint = model_info['checkpoint'].strip() if not isinstance(model_info['eval'], list): evals = [model_info['eval']] else: evals = model_info['eval'] eval = ' '.join(evals) return dict( config=config, job_name=job_name, work_dir=work_dir, checkpoint=checkpoint, eval=eval) def create_test_bash_info(commands, model_test_dict, port, script_name, partition): config = model_test_dict['config'] job_name = model_test_dict['job_name'] checkpoint = model_test_dict['checkpoint'] work_dir = model_test_dict['work_dir'] eval = model_test_dict['eval'] echo_info = f' \necho \'{config}\' &' commands.append(echo_info) commands.append('\n') command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \ f'CPUS_PER_TASK=2 {script_name} ' command_info += f'{partition} ' command_info += f'{job_name} ' command_info += f'{config} ' command_info += f'$CHECKPOINT_DIR/{checkpoint} ' command_info += f'--work-dir {work_dir} ' command_info += f'--eval {eval} ' command_info += f'--cfg-option dist_params.port={port} ' command_info += ' &' commands.append(command_info) def main(): args = parse_args() if args.out: out_suffix = args.out.split('.')[-1] assert args.out.endswith('.sh'), \ f'Expected out file path suffix is .sh, but get .{out_suffix}' assert args.out or args.run, \ ('Please specify at least one operation (save/run/ the ' 'script) with the argument "--out" or "--run"') commands = [] partition_name = 'PARTITION=$1 ' commands.append(partition_name) commands.append('\n') checkpoint_root = 'CHECKPOINT_DIR=$2 ' commands.append(checkpoint_root) commands.append('\n') script_name = osp.join('tools', 'slurm_test.sh') port = args.port work_dir = args.work_dir cfg = Config.fromfile(args.config) for model_key in cfg: model_infos = cfg[model_key] if not isinstance(model_infos, list): model_infos = [model_infos] for model_info in model_infos: print('processing: ', model_info['config']) model_test_dict = process_model_info(model_info, work_dir) create_test_bash_info(commands, model_test_dict, port, script_name, '$PARTITION') port += 1 command_str = ''.join(commands) if args.out: with open(args.out, 'w') as f: f.write(command_str) if args.run: os.system(command_str) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/convert_train_benchmark_script.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp def parse_args(): parser = argparse.ArgumentParser( description='Convert benchmark model json to script') parser.add_argument( 'txt_path', type=str, help='txt path output by benchmark_filter') parser.add_argument( '--partition', type=str, default='openmmlab', help='slurm partition name') parser.add_argument( '--max-keep-ckpts', type=int, default=1, help='The maximum checkpoints to keep') parser.add_argument( '--run', action='store_true', help='run script directly') parser.add_argument( '--out', type=str, help='path to save model benchmark script') args = parser.parse_args() return args def main(): args = parse_args() if args.out: out_suffix = args.out.split('.')[-1] assert args.out.endswith('.sh'), \ f'Expected out file path suffix is .sh, but get .{out_suffix}' assert args.out or args.run, \ ('Please specify at least one operation (save/run/ the ' 'script) with the argument "--out" or "--run"') partition = args.partition # cluster name root_name = './tools' train_script_name = osp.join(root_name, 'slurm_train.sh') # stdout is no output stdout_cfg = '>/dev/null' max_keep_ckpts = args.max_keep_ckpts commands = [] with open(args.txt_path, 'r') as f: model_cfgs = f.readlines() for i, cfg in enumerate(model_cfgs): cfg = cfg.strip() if len(cfg) == 0: continue # print cfg name echo_info = f'echo \'{cfg}\' &' commands.append(echo_info) commands.append('\n') fname, _ = osp.splitext(osp.basename(cfg)) out_fname = osp.join(root_name, 'work_dir', fname) # default setting if cfg.find('16x') >= 0: command_info = f'GPUS=16 GPUS_PER_NODE=8 ' \ f'CPUS_PER_TASK=2 {train_script_name} ' elif cfg.find('gn-head_4x4_1x_coco.py') >= 0 or \ cfg.find('gn-head_4x4_2x_coco.py') >= 0: command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \ f'CPUS_PER_TASK=2 {train_script_name} ' else: command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \ f'CPUS_PER_TASK=2 {train_script_name} ' command_info += f'{partition} ' command_info += f'{fname} ' command_info += f'{cfg} ' command_info += f'{out_fname} ' if max_keep_ckpts: command_info += f'--cfg-options ' \ f'checkpoint_config.max_keep_ckpts=' \ f'{max_keep_ckpts}' + ' ' command_info += f'{stdout_cfg} &' commands.append(command_info) if i < len(model_cfgs): commands.append('\n') command_str = ''.join(commands) if args.out: with open(args.out, 'w') as f: f.write(command_str) if args.run: os.system(command_str) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/gather_models.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import glob import json import os.path as osp import shutil import subprocess from collections import OrderedDict import mmcv import torch import yaml def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds): class OrderedDumper(Dumper): pass def _dict_representer(dumper, data): return dumper.represent_mapping( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()) OrderedDumper.add_representer(OrderedDict, _dict_representer) return yaml.dump(data, stream, OrderedDumper, **kwds) def process_checkpoint(in_file, out_file): checkpoint = torch.load(in_file, map_location='cpu') # remove optimizer for smaller file size if 'optimizer' in checkpoint: del checkpoint['optimizer'] # remove ema state_dict for key in list(checkpoint['state_dict']): if key.startswith('ema_'): checkpoint['state_dict'].pop(key) # if it is necessary to remove some sensitive data in checkpoint['meta'], # add the code here. if torch.__version__ >= '1.6': torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) else: torch.save(checkpoint, out_file) sha = subprocess.check_output(['sha256sum', out_file]).decode() final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) subprocess.Popen(['mv', out_file, final_file]) return final_file def is_by_epoch(config): cfg = mmcv.Config.fromfile('./configs/' + config) return cfg.runner.type == 'EpochBasedRunner' def get_final_epoch_or_iter(config): cfg = mmcv.Config.fromfile('./configs/' + config) if cfg.runner.type == 'EpochBasedRunner': return cfg.runner.max_epochs else: return cfg.runner.max_iters def get_best_epoch_or_iter(exp_dir): best_epoch_iter_full_path = list( sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1] best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1] best_epoch_or_iter = best_epoch_or_iter_model_path.\ split('_')[-1].split('.')[0] return best_epoch_or_iter_model_path, int(best_epoch_or_iter) def get_real_epoch_or_iter(config): cfg = mmcv.Config.fromfile('./configs/' + config) if cfg.runner.type == 'EpochBasedRunner': epoch = cfg.runner.max_epochs if cfg.data.train.type == 'RepeatDataset': epoch *= cfg.data.train.times return epoch else: return cfg.runner.max_iters def get_final_results(log_json_path, epoch_or_iter, results_lut, by_epoch=True): result_dict = dict() last_val_line = None last_train_line = None last_val_line_idx = -1 last_train_line_idx = -1 with open(log_json_path, 'r') as f: for i, line in enumerate(f.readlines()): log_line = json.loads(line) if 'mode' not in log_line.keys(): continue if by_epoch: if (log_line['mode'] == 'train' and log_line['epoch'] == epoch_or_iter): result_dict['memory'] = log_line['memory'] if (log_line['mode'] == 'val' and log_line['epoch'] == epoch_or_iter): result_dict.update({ key: log_line[key] for key in results_lut if key in log_line }) return result_dict else: if log_line['mode'] == 'train': last_train_line_idx = i last_train_line = log_line if log_line and log_line['mode'] == 'val': last_val_line_idx = i last_val_line = log_line # bug: max_iters = 768, last_train_line['iter'] = 750 assert last_val_line_idx == last_train_line_idx + 1, \ 'Log file is incomplete' result_dict['memory'] = last_train_line['memory'] result_dict.update({ key: last_val_line[key] for key in results_lut if key in last_val_line }) return result_dict def get_dataset_name(config): # If there are more dataset, add here. name_map = dict( CityscapesDataset='Cityscapes', CocoDataset='COCO', CocoPanopticDataset='COCO', DeepFashionDataset='Deep Fashion', LVISV05Dataset='LVIS v0.5', LVISV1Dataset='LVIS v1', VOCDataset='Pascal VOC', WIDERFaceDataset='WIDER Face', OpenImagesDataset='OpenImagesDataset', OpenImagesChallengeDataset='OpenImagesChallengeDataset') cfg = mmcv.Config.fromfile('./configs/' + config) return name_map[cfg.dataset_type] def convert_model_info_to_pwc(model_infos): pwc_files = {} for model in model_infos: cfg_folder_name = osp.split(model['config'])[-2] pwc_model_info = OrderedDict() pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0] pwc_model_info['In Collection'] = 'Please fill in Collection name' pwc_model_info['Config'] = osp.join('configs', model['config']) # get metadata memory = round(model['results']['memory'] / 1024, 1) meta_data = OrderedDict() meta_data['Training Memory (GB)'] = memory if 'epochs' in model: meta_data['Epochs'] = get_real_epoch_or_iter(model['config']) else: meta_data['Iterations'] = get_real_epoch_or_iter(model['config']) pwc_model_info['Metadata'] = meta_data # get dataset name dataset_name = get_dataset_name(model['config']) # get results results = [] # if there are more metrics, add here. if 'bbox_mAP' in model['results']: metric = round(model['results']['bbox_mAP'] * 100, 1) results.append( OrderedDict( Task='Object Detection', Dataset=dataset_name, Metrics={'box AP': metric})) if 'segm_mAP' in model['results']: metric = round(model['results']['segm_mAP'] * 100, 1) results.append( OrderedDict( Task='Instance Segmentation', Dataset=dataset_name, Metrics={'mask AP': metric})) if 'PQ' in model['results']: metric = round(model['results']['PQ'], 1) results.append( OrderedDict( Task='Panoptic Segmentation', Dataset=dataset_name, Metrics={'PQ': metric})) pwc_model_info['Results'] = results link_string = 'https://download.openmmlab.com/mmdetection/v2.0/' link_string += '{}/{}'.format(model['config'].rstrip('.py'), osp.split(model['model_path'])[-1]) pwc_model_info['Weights'] = link_string if cfg_folder_name in pwc_files: pwc_files[cfg_folder_name].append(pwc_model_info) else: pwc_files[cfg_folder_name] = [pwc_model_info] return pwc_files def parse_args(): parser = argparse.ArgumentParser(description='Gather benchmarked models') parser.add_argument( 'root', type=str, help='root path of benchmarked models to be gathered') parser.add_argument( 'out', type=str, help='output path of gathered models to be stored') parser.add_argument( '--best', action='store_true', help='whether to gather the best model.') args = parser.parse_args() return args def main(): args = parse_args() models_root = args.root models_out = args.out mmcv.mkdir_or_exist(models_out) # find all models in the root directory to be gathered raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True)) # filter configs that is not trained in the experiments dir used_configs = [] for raw_config in raw_configs: if osp.exists(osp.join(models_root, raw_config)): used_configs.append(raw_config) print(f'Find {len(used_configs)} models to be gathered') # find final_ckpt and log file for trained each config # and parse the best performance model_infos = [] for used_config in used_configs: exp_dir = osp.join(models_root, used_config) by_epoch = is_by_epoch(used_config) # check whether the exps is finished if args.best is True: final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir) else: final_epoch_or_iter = get_final_epoch_or_iter(used_config) final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter', final_epoch_or_iter) model_path = osp.join(exp_dir, final_model) # skip if the model is still training if not osp.exists(model_path): continue # get the latest logs log_json_path = list( sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1] log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1] cfg = mmcv.Config.fromfile('./configs/' + used_config) results_lut = cfg.evaluation.metric if not isinstance(results_lut, list): results_lut = [results_lut] # case when using VOC, the evaluation key is only 'mAP' # when using Panoptic Dataset, the evaluation key is 'PQ'. for i, key in enumerate(results_lut): if 'mAP' not in key and 'PQ' not in key: results_lut[i] = key + '_mAP' model_performance = get_final_results(log_json_path, final_epoch_or_iter, results_lut, by_epoch) if model_performance is None: continue model_time = osp.split(log_txt_path)[-1].split('.')[0] model_info = dict( config=used_config, results=model_performance, model_time=model_time, final_model=final_model, log_json_path=osp.split(log_json_path)[-1]) model_info['epochs' if by_epoch else 'iterations'] =\ final_epoch_or_iter model_infos.append(model_info) # publish model for each checkpoint publish_model_infos = [] for model in model_infos: model_publish_dir = osp.join(models_out, model['config'].rstrip('.py')) mmcv.mkdir_or_exist(model_publish_dir) model_name = osp.split(model['config'])[-1].split('.')[0] model_name += '_' + model['model_time'] publish_model_path = osp.join(model_publish_dir, model_name) trained_model_path = osp.join(models_root, model['config'], model['final_model']) # convert model final_model_path = process_checkpoint(trained_model_path, publish_model_path) # copy log shutil.copy( osp.join(models_root, model['config'], model['log_json_path']), osp.join(model_publish_dir, f'{model_name}.log.json')) shutil.copy( osp.join(models_root, model['config'], model['log_json_path'].rstrip('.json')), osp.join(model_publish_dir, f'{model_name}.log')) # copy config to guarantee reproducibility config_path = model['config'] config_path = osp.join( 'configs', config_path) if 'configs' not in config_path else config_path target_config_path = osp.split(config_path)[-1] shutil.copy(config_path, osp.join(model_publish_dir, target_config_path)) model['model_path'] = final_model_path publish_model_infos.append(model) models = dict(models=publish_model_infos) print(f'Totally gathered {len(publish_model_infos)} models') mmcv.dump(models, osp.join(models_out, 'model_info.json')) pwc_files = convert_model_info_to_pwc(publish_model_infos) for name in pwc_files: with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f: ordered_yaml_dump(pwc_files[name], f, encoding='utf-8') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/gather_test_benchmark_metric.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import glob import os.path as osp import mmcv from mmcv import Config def parse_args(): parser = argparse.ArgumentParser( description='Gather benchmarked models metric') parser.add_argument('config', help='test config file path') parser.add_argument( 'root', type=str, help='root path of benchmarked models to be gathered') parser.add_argument( '--out', type=str, help='output path of gathered metrics to be stored') parser.add_argument( '--not-show', action='store_true', help='not show metrics') parser.add_argument( '--show-all', action='store_true', help='show all model metrics') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() root_path = args.root metrics_out = args.out result_dict = {} cfg = Config.fromfile(args.config) for model_key in cfg: model_infos = cfg[model_key] if not isinstance(model_infos, list): model_infos = [model_infos] for model_info in model_infos: record_metrics = model_info['metric'] config = model_info['config'].strip() fname, _ = osp.splitext(osp.basename(config)) metric_json_dir = osp.join(root_path, fname) if osp.exists(metric_json_dir): json_list = glob.glob(osp.join(metric_json_dir, '*.json')) if len(json_list) > 0: log_json_path = list(sorted(json_list))[-1] metric = mmcv.load(log_json_path) if config in metric.get('config', {}): new_metrics = dict() for record_metric_key in record_metrics: record_metric_key_bk = record_metric_key old_metric = record_metrics[record_metric_key] if record_metric_key == 'AR_1000': record_metric_key = 'AR@1000' if record_metric_key not in metric['metric']: raise KeyError( 'record_metric_key not exist, please ' 'check your config') new_metric = round( metric['metric'][record_metric_key] * 100, 1) new_metrics[record_metric_key_bk] = new_metric if args.show_all: result_dict[config] = dict( before=record_metrics, after=new_metrics) else: for record_metric_key in record_metrics: old_metric = record_metrics[record_metric_key] new_metric = new_metrics[record_metric_key] if old_metric != new_metric: result_dict[config] = dict( before=record_metrics, after=new_metrics) break else: print(f'{config} not included in: {log_json_path}') else: print(f'{config} not exist file: {metric_json_dir}') else: print(f'{config} not exist dir: {metric_json_dir}') if metrics_out: mmcv.mkdir_or_exist(metrics_out) mmcv.dump(result_dict, osp.join(metrics_out, 'batch_test_metric_info.json')) if not args.not_show: print('===================================') for config_name, metrics in result_dict.items(): print(config_name, metrics) print('===================================') ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/gather_train_benchmark_metric.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import glob import os.path as osp import mmcv from gather_models import get_final_results try: import xlrd except ImportError: xlrd = None try: import xlutils from xlutils.copy import copy except ImportError: xlutils = None def parse_args(): parser = argparse.ArgumentParser( description='Gather benchmarked models metric') parser.add_argument( 'root', type=str, help='root path of benchmarked models to be gathered') parser.add_argument( 'txt_path', type=str, help='txt path output by benchmark_filter') parser.add_argument( '--out', type=str, help='output path of gathered metrics to be stored') parser.add_argument( '--not-show', action='store_true', help='not show metrics') parser.add_argument( '--excel', type=str, help='input path of excel to be recorded') parser.add_argument( '--ncol', type=int, help='Number of column to be modified or appended') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() if args.excel: assert args.ncol, 'Please specify "--excel" and "--ncol" ' \ 'at the same time' if xlrd is None: raise RuntimeError( 'xlrd is not installed,' 'Please use “pip install xlrd==1.2.0” to install') if xlutils is None: raise RuntimeError( 'xlutils is not installed,' 'Please use “pip install xlutils==2.0.0” to install') readbook = xlrd.open_workbook(args.excel) sheet = readbook.sheet_by_name('Sheet1') sheet_info = {} total_nrows = sheet.nrows for i in range(3, sheet.nrows): sheet_info[sheet.row_values(i)[0]] = i xlrw = copy(readbook) table = xlrw.get_sheet(0) root_path = args.root metrics_out = args.out result_dict = {} with open(args.txt_path, 'r') as f: model_cfgs = f.readlines() for i, config in enumerate(model_cfgs): config = config.strip() if len(config) == 0: continue config_name = osp.split(config)[-1] config_name = osp.splitext(config_name)[0] result_path = osp.join(root_path, config_name) if osp.exists(result_path): # 1 read config cfg = mmcv.Config.fromfile(config) total_epochs = cfg.runner.max_epochs final_results = cfg.evaluation.metric if not isinstance(final_results, list): final_results = [final_results] final_results_out = [] for key in final_results: if 'proposal_fast' in key: final_results_out.append('AR@1000') # RPN elif 'mAP' not in key: final_results_out.append(key + '_mAP') # 2 determine whether total_epochs ckpt exists ckpt_path = f'epoch_{total_epochs}.pth' if osp.exists(osp.join(result_path, ckpt_path)): log_json_path = list( sorted(glob.glob(osp.join(result_path, '*.log.json'))))[-1] # 3 read metric model_performance = get_final_results( log_json_path, total_epochs, final_results_out) if model_performance is None: print(f'log file error: {log_json_path}') continue for performance in model_performance: if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']: metric = round( model_performance[performance] * 100, 1) model_performance[performance] = metric result_dict[config] = model_performance # update and append excel content if args.excel: if 'AR@1000' in model_performance: metrics = f'{model_performance["AR@1000"]}' \ f'(AR@1000)' elif 'segm_mAP' in model_performance: metrics = f'{model_performance["bbox_mAP"]}/' \ f'{model_performance["segm_mAP"]}' else: metrics = f'{model_performance["bbox_mAP"]}' row_num = sheet_info.get(config, None) if row_num: table.write(row_num, args.ncol, metrics) else: table.write(total_nrows, 0, config) table.write(total_nrows, args.ncol, metrics) total_nrows += 1 else: print(f'{config} not exist: {ckpt_path}') else: print(f'not exist: {config}') # 4 save or print results if metrics_out: mmcv.mkdir_or_exist(metrics_out) mmcv.dump(result_dict, osp.join(metrics_out, 'model_metric_info.json')) if not args.not_show: print('===================================') for config_name, metrics in result_dict.items(): print(config_name, metrics) print('===================================') if args.excel: filename, sufflx = osp.splitext(args.excel) xlrw.save(f'{filename}_o{sufflx}') print(f'>>> Output {filename}_o{sufflx}') ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/linter.sh ================================================ yapf -r -i mmdet/ configs/ tests/ tools/ isort -rc mmdet/ configs/ tests/ tools/ flake8 . ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/test_benchmark.sh ================================================ PARTITION=$1 CHECKPOINT_DIR=$2 echo 'configs/atss/atss_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py $CHECKPOINT_DIR/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth --work-dir tools/batch_test/atss_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29666 & echo 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION autoassign_r50_fpn_8x2_1x_coco configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py $CHECKPOINT_DIR/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth --work-dir tools/batch_test/autoassign_r50_fpn_8x2_1x_coco --eval bbox --cfg-option dist_params.port=29667 & echo 'configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_carafe_1x_coco configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_carafe_1x_coco --eval bbox --cfg-option dist_params.port=29668 & echo 'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cascade_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth --work-dir tools/batch_test/cascade_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29669 & echo 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cascade_mask_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth --work-dir tools/batch_test/cascade_mask_rcnn_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29670 & echo 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION crpn_faster_rcnn_r50_caffe_fpn_1x_coco configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth --work-dir tools/batch_test/crpn_faster_rcnn_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29671 & echo 'configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION centripetalnet_hourglass104_mstest_16x6_210e_coco configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py $CHECKPOINT_DIR/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth --work-dir tools/batch_test/centripetalnet_hourglass104_mstest_16x6_210e_coco --eval bbox --cfg-option dist_params.port=29672 & echo 'configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cornernet_hourglass104_mstest_8x6_210e_coco configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py $CHECKPOINT_DIR/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth --work-dir tools/batch_test/cornernet_hourglass104_mstest_8x6_210e_coco --eval bbox --cfg-option dist_params.port=29673 & echo 'configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco --eval bbox --cfg-option dist_params.port=29674 & echo 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deformable_detr_r50_16x2_50e_coco configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py $CHECKPOINT_DIR/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth --work-dir tools/batch_test/deformable_detr_r50_16x2_50e_coco --eval bbox --cfg-option dist_params.port=29675 & echo 'configs/detectors/detectors_htc_r50_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION detectors_htc_r50_1x_coco configs/detectors/detectors_htc_r50_1x_coco.py $CHECKPOINT_DIR/detectors_htc_r50_1x_coco-329b1453.pth --work-dir tools/batch_test/detectors_htc_r50_1x_coco --eval bbox segm --cfg-option dist_params.port=29676 & echo 'configs/detr/detr_r50_8x2_150e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION detr_r50_8x2_150e_coco configs/detr/detr_r50_8x2_150e_coco.py $CHECKPOINT_DIR/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth --work-dir tools/batch_test/detr_r50_8x2_150e_coco --eval bbox --cfg-option dist_params.port=29677 & echo 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION dh_faster_rcnn_r50_fpn_1x_coco configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth --work-dir tools/batch_test/dh_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29678 & echo 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION dynamic_rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dynamic_rcnn_r50_fpn_1x-62a3f276.pth --work-dir tools/batch_test/dynamic_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29679 & echo 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_attention_1111_1x_coco configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_attention_1111_1x_coco --eval bbox --cfg-option dist_params.port=29680 & echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29681 & echo 'configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py $CHECKPOINT_DIR/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth --work-dir tools/batch_test/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco --eval bbox --cfg-option dist_params.port=29682 & echo 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fovea_align_r50_fpn_gn-head_4x4_2x_coco configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py $CHECKPOINT_DIR/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth --work-dir tools/batch_test/fovea_align_r50_fpn_gn-head_4x4_2x_coco --eval bbox --cfg-option dist_params.port=29683 & echo 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_free_anchor_r50_fpn_1x_coco configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth --work-dir tools/batch_test/retinanet_free_anchor_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29684 & echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py $CHECKPOINT_DIR/fsaf_r50_fpn_1x_coco-94ccc51f.pth --work-dir tools/batch_test/fsaf_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29685 & echo 'configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco --eval bbox segm --cfg-option dist_params.port=29686 & echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py $CHECKPOINT_DIR/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth --work-dir tools/batch_test/gfl_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29687 & echo 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_gn-all_2x_coco --eval bbox segm --cfg-option dist_params.port=29688 & echo 'configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_gn_ws-all_1x_coco configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_gn_ws-all_1x_coco --eval bbox --cfg-option dist_params.port=29689 & echo 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION grid_rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py $CHECKPOINT_DIR/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth --work-dir tools/batch_test/grid_rcnn_r50_fpn_gn-head_2x_coco --eval bbox --cfg-option dist_params.port=29690 & echo 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_groie_1x_coco configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_groie_1x_coco --eval bbox --cfg-option dist_params.port=29691 & echo 'configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ga_retinanet_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth --work-dir tools/batch_test/ga_retinanet_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29692 & echo 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ga_faster_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth --work-dir tools/batch_test/ga_faster_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29693 & echo 'configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_hrnetv2p_w18_1x_coco configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth --work-dir tools/batch_test/faster_rcnn_hrnetv2p_w18_1x_coco --eval bbox --cfg-option dist_params.port=29694 & echo 'configs/htc/htc_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py $CHECKPOINT_DIR/htc_r50_fpn_1x_coco_20200317-7332cf16.pth --work-dir tools/batch_test/htc_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29695 & echo 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION libra_faster_rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth --work-dir tools/batch_test/libra_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29696 & echo 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_1x_coco configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29697 & echo 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ms_rcnn_r50_caffe_fpn_1x_coco configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth --work-dir tools/batch_test/ms_rcnn_r50_caffe_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29698 & echo 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py $CHECKPOINT_DIR/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth --work-dir tools/batch_test/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco --eval bbox --cfg-option dist_params.port=29699 & echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_r50_nasfpn_crop640_50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py $CHECKPOINT_DIR/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth --work-dir tools/batch_test/retinanet_r50_nasfpn_crop640_50e_coco --eval bbox --cfg-option dist_params.port=29700 & echo 'configs/paa/paa_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py $CHECKPOINT_DIR/paa_r50_fpn_1x_coco_20200821-936edec3.pth --work-dir tools/batch_test/paa_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29701 & echo 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_pafpn_1x_coco configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth --work-dir tools/batch_test/faster_rcnn_r50_pafpn_1x_coco --eval bbox --cfg-option dist_params.port=29702 & echo 'configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pisa_faster_rcnn_r50_fpn_1x_coco configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth --work-dir tools/batch_test/pisa_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29703 & echo 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION point_rend_r50_caffe_fpn_mstrain_1x_coco configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py $CHECKPOINT_DIR/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth --work-dir tools/batch_test/point_rend_r50_caffe_fpn_mstrain_1x_coco --eval bbox segm --cfg-option dist_params.port=29704 & echo 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth --work-dir tools/batch_test/mask_rcnn_regnetx-3.2GF_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29705 & echo 'configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION reppoints_moment_r50_fpn_1x_coco configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py $CHECKPOINT_DIR/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth --work-dir tools/batch_test/reppoints_moment_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29706 & echo 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r2_101_fpn_2x_coco configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py $CHECKPOINT_DIR/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth --work-dir tools/batch_test/faster_rcnn_r2_101_fpn_2x_coco --eval bbox --cfg-option dist_params.port=29707 & echo 'configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth --work-dir tools/batch_test/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco --eval bbox --cfg-option dist_params.port=29708 & echo 'configs/retinanet/retinanet_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_r50_fpn_1x_coco configs/retinanet/retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth --work-dir tools/batch_test/retinanet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29709 & echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth --work-dir tools/batch_test/rpn_r50_fpn_1x_coco --eval proposal_fast --cfg-option dist_params.port=29710 & echo 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sabl_retinanet_r50_fpn_1x_coco configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth --work-dir tools/batch_test/sabl_retinanet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29711 & echo 'configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sabl_faster_rcnn_r50_fpn_1x_coco configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth --work-dir tools/batch_test/sabl_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29712 & echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/scnet_r50_fpn_1x_coco-c3f09857.pth --work-dir tools/batch_test/scnet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29713 & echo 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sparse_rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth --work-dir tools/batch_test/sparse_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29714 & echo 'configs/ssd/ssd300_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ssd300_coco configs/ssd/ssd300_coco.py $CHECKPOINT_DIR/ssd300_coco_20210803_015428-d231a06e.pth --work-dir tools/batch_test/ssd300_coco --eval bbox --cfg-option dist_params.port=29715 & echo 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION tridentnet_r50_caffe_1x_coco configs/tridentnet/tridentnet_r50_caffe_1x_coco.py $CHECKPOINT_DIR/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth --work-dir tools/batch_test/tridentnet_r50_caffe_1x_coco --eval bbox --cfg-option dist_params.port=29716 & echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth --work-dir tools/batch_test/vfnet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29717 & echo 'configs/yolact/yolact_r50_1x8_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolact_r50_1x8_coco configs/yolact/yolact_r50_1x8_coco.py $CHECKPOINT_DIR/yolact_r50_1x8_coco_20200908-f38d58df.pth --work-dir tools/batch_test/yolact_r50_1x8_coco --eval bbox segm --cfg-option dist_params.port=29718 & echo 'configs/yolo/yolov3_d53_320_273e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolov3_d53_320_273e_coco configs/yolo/yolov3_d53_320_273e_coco.py $CHECKPOINT_DIR/yolov3_d53_320_273e_coco-421362b6.pth --work-dir tools/batch_test/yolov3_d53_320_273e_coco --eval bbox --cfg-option dist_params.port=29719 & echo 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolof_r50_c5_8x8_1x_coco configs/yolof/yolof_r50_c5_8x8_1x_coco.py $CHECKPOINT_DIR/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth --work-dir tools/batch_test/yolof_r50_c5_8x8_1x_coco --eval bbox --cfg-option dist_params.port=29720 & echo 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION centernet_resnet18_dcnv2_140e_coco configs/centernet/centernet_resnet18_dcnv2_140e_coco.py $CHECKPOINT_DIR/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth --work-dir tools/batch_test/centernet_resnet18_dcnv2_140e_coco --eval bbox --cfg-option dist_params.port=29721 & echo 'configs/yolox/yolox_tiny_8x8_300e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolox_tiny_8x8_300e_coco configs/yolox/yolox_tiny_8x8_300e_coco.py $CHECKPOINT_DIR/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth --work-dir tools/batch_test/yolox_tiny_8x8_300e_coco --eval bbox --cfg-option dist_params.port=29722 & echo 'configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ssdlite_mobilenetv2_scratch_600e_coco configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py $CHECKPOINT_DIR/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth --work-dir tools/batch_test/ssdlite_mobilenetv2_scratch_600e_coco --eval bbox --cfg-option dist_params.port=29723 & ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/test_init_backbone.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """Check out backbone whether successfully load pretrained checkpoint.""" import copy import os from os.path import dirname, exists, join import pytest from mmcv import Config, ProgressBar from mmcv.runner import _load_checkpoint from mmdet.models import build_detector def _get_config_directory(): """Find the predefined detector config directory.""" try: # Assume we are running in the source mmdetection repo repo_dpath = dirname(dirname(__file__)) except NameError: # For IPython development when this __file__ is not defined import mmdet repo_dpath = dirname(dirname(mmdet.__file__)) config_dpath = join(repo_dpath, 'configs') if not exists(config_dpath): raise Exception('Cannot find config path') return config_dpath def _get_config_module(fname): """Load a configuration as a python module.""" from mmcv import Config config_dpath = _get_config_directory() config_fpath = join(config_dpath, fname) config_mod = Config.fromfile(config_fpath) return config_mod def _get_detector_cfg(fname): """Grab configs necessary to create a detector. These are deep copied to allow for safe modification of parameters without influencing other tests. """ config = _get_config_module(fname) model = copy.deepcopy(config.model) return model def _traversed_config_file(): """We traversed all potential config files under the `config` file. If you need to print details or debug code, you can use this function. If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you need add the folder name in `ignores_folder` (if the config files in this folder all set backbone.init_cfg is None) or add config name in `ignores_file` (if the config file set backbone.init_cfg is None) """ config_path = _get_config_directory() check_cfg_names = [] # `base`, `legacy_1.x` and `common` ignored by default. ignores_folder = ['_base_', 'legacy_1.x', 'common'] # 'ld' need load teacher model, if want to check 'ld', # please check teacher_config path first. ignores_folder += ['ld'] # `selfsup_pretrain` need convert model, if want to check this model, # need to convert the model first. ignores_folder += ['selfsup_pretrain'] # the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes', # 'scratch' is None. # the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`) # is None # Please confirm `bockbone.init_cfg` is None first. ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch'] ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py'] for config_file_name in os.listdir(config_path): if config_file_name not in ignores_folder: config_file = join(config_path, config_file_name) if os.path.isdir(config_file): for config_sub_file in os.listdir(config_file): if config_sub_file.endswith('py') and \ config_sub_file not in ignores_file: name = join(config_file, config_sub_file) check_cfg_names.append(name) return check_cfg_names def _check_backbone(config, print_cfg=True): """Check out backbone whether successfully load pretrained model, by using `backbone.init_cfg`. First, using `mmcv._load_checkpoint` to load the checkpoint without loading models. Then, using `build_detector` to build models, and using `model.init_weights()` to initialize the parameters. Finally, assert weights and bias of each layer loaded from pretrained checkpoint are equal to the weights and bias of original checkpoint. For the convenience of comparison, we sum up weights and bias of each loaded layer separately. Args: config (str): Config file path. print_cfg (bool): Whether print logger and return the result. Returns: results (str or None): If backbone successfully load pretrained checkpoint, return None; else, return config file path. """ if print_cfg: print('-' * 15 + 'loading ', config) cfg = Config.fromfile(config) init_cfg = None try: init_cfg = cfg.model.backbone.init_cfg init_flag = True except AttributeError: init_flag = False if init_cfg is None or init_cfg.get('type') != 'Pretrained': init_flag = False if init_flag: checkpoint = _load_checkpoint(init_cfg.checkpoint) if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint model = build_detector( cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) model.init_weights() checkpoint_layers = state_dict.keys() for name, value in model.backbone.state_dict().items(): if name in checkpoint_layers: assert value.equal(state_dict[name]) if print_cfg: print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 + '\n', ) return None else: if print_cfg: print(config + '\n' + '-' * 10 + 'config file do not have init_cfg' + '-' * 10 + '\n') return config @pytest.mark.parametrize('config', _traversed_config_file()) def test_load_pretrained(config): """Check out backbone whether successfully load pretrained model by using `backbone.init_cfg`. Details please refer to `_check_backbone` """ _check_backbone(config, print_cfg=False) def _test_load_pretrained(): """We traversed all potential config files under the `config` file. If you need to print details or debug code, you can use this function. Returns: check_cfg_names (list[str]): Config files that backbone initialized from pretrained checkpoint might be problematic. Need to recheck the config file. The output including the config files that the backbone.init_cfg is None """ check_cfg_names = _traversed_config_file() need_check_cfg = [] prog_bar = ProgressBar(len(check_cfg_names)) for config in check_cfg_names: init_cfg_name = _check_backbone(config) if init_cfg_name is not None: need_check_cfg.append(init_cfg_name) prog_bar.update() print('These config files need to be checked again') print(need_check_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/.dev_scripts/train_benchmark.sh ================================================ echo 'configs/atss/atss_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py ./tools/work_dir/atss_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab autoassign_r50_fpn_8x2_1x_coco configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py ./tools/work_dir/autoassign_r50_fpn_8x2_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab cascade_mask_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/cascade_mask_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab crpn_faster_rcnn_r50_caffe_fpn_1x_coco configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/crpn_faster_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab centernet_resnet18_dcnv2_140e_coco configs/centernet/centernet_resnet18_dcnv2_140e_coco.py ./tools/work_dir/centernet_resnet18_dcnv2_140e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py' & GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab centripetalnet_hourglass104_mstest_16x6_210e_coco configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py ./tools/work_dir/centripetalnet_hourglass104_mstest_16x6_210e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab cornernet_hourglass104_mstest_8x6_210e_coco configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py ./tools/work_dir/cornernet_hourglass104_mstest_8x6_210e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/detectors/detectors_htc_r50_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab detectors_htc_r50_1x_coco configs/detectors/detectors_htc_r50_1x_coco.py ./tools/work_dir/detectors_htc_r50_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py' & GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab deformable_detr_r50_16x2_50e_coco configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py ./tools/work_dir/deformable_detr_r50_16x2_50e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/detr/detr_r50_8x2_150e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab detr_r50_8x2_150e_coco configs/detr/detr_r50_8x2_150e_coco.py ./tools/work_dir/detr_r50_8x2_150e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab dh_faster_rcnn_r50_fpn_1x_coco configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/dh_faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab dynamic_rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/dynamic_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_dc5_mstrain_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_fpn_mstrain_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_ohem_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_ohem_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py' & GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab fovea_align_r50_fpn_gn-head_4x4_2x_coco configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py ./tools/work_dir/fovea_align_r50_fpn_gn-head_4x4_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_fp16_1x_coco configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_fp16_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_fpn_fp16_1x_coco configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py ./tools/work_dir/retinanet_r50_fpn_fp16_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_free_anchor_r50_fpn_1x_coco configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py ./tools/work_dir/retinanet_free_anchor_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py ./tools/work_dir/fsaf_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py ./tools/work_dir/gfl_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_ghm_r50_fpn_1x_coco configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py ./tools/work_dir/retinanet_ghm_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab grid_rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py ./tools/work_dir/grid_rcnn_r50_fpn_gn-head_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ga_faster_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py ./tools/work_dir/ga_faster_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/htc/htc_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py ./tools/work_dir/htc_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ld_r18_gflv1_r101_fpn_coco_1x configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py ./tools/work_dir/ld_r18_gflv1_r101_fpn_coco_1x --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab libra_faster_rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/libra_faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py ./tools/work_dir/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ms_rcnn_r50_caffe_fpn_1x_coco configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/ms_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py' & GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py ./tools/work_dir/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/paa/paa_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py ./tools/work_dir/paa_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab pisa_mask_rcnn_r50_fpn_1x_coco configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/pisa_mask_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab point_rend_r50_caffe_fpn_mstrain_1x_coco configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py ./tools/work_dir/point_rend_r50_caffe_fpn_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab reppoints_moment_r50_fpn_gn-neck+head_1x_coco configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py ./tools/work_dir/reppoints_moment_r50_fpn_gn-neck+head_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_caffe_fpn_1x_coco configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py ./tools/work_dir/retinanet_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py ./tools/work_dir/rpn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab sabl_retinanet_r50_fpn_1x_coco configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py ./tools/work_dir/sabl_retinanet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/ssd/ssd300_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ssd300_coco configs/ssd/ssd300_coco.py ./tools/work_dir/ssd300_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab tridentnet_r50_caffe_1x_coco configs/tridentnet/tridentnet_r50_caffe_1x_coco.py ./tools/work_dir/tridentnet_r50_caffe_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py ./tools/work_dir/vfnet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/yolact/yolact_r50_8x8_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolact_r50_8x8_coco configs/yolact/yolact_r50_8x8_coco.py ./tools/work_dir/yolact_r50_8x8_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/yolo/yolov3_d53_320_273e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolov3_d53_320_273e_coco configs/yolo/yolov3_d53_320_273e_coco.py ./tools/work_dir/yolov3_d53_320_273e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab sparse_rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/sparse_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py ./tools/work_dir/scnet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolof_r50_c5_8x8_1x_coco configs/yolof/yolof_r50_c5_8x8_1x_coco.py ./tools/work_dir/yolof_r50_c5_8x8_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_carafe_1x_coco configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_carafe_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_mdpool_1x_coco configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_mdpool_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_dpool_1x_coco configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_dpool_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_gn-all_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_gn_ws-all_2x_coco configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_gn_ws-all_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_hrnetv2p_w18_1x_coco configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py ./tools/work_dir/mask_rcnn_hrnetv2p_w18_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_pafpn_1x_coco configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_pafpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_nasfpn_crop640_50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py ./tools/work_dir/retinanet_r50_nasfpn_crop640_50e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py ./tools/work_dir/mask_rcnn_regnetx-3.2GF_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py ./tools/work_dir/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r2_101_fpn_2x_coco configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py ./tools/work_dir/faster_rcnn_r2_101_fpn_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_groie_1x_coco configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_groie_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_1x_cityscapes configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py ./tools/work_dir/mask_rcnn_r50_fpn_1x_cityscapes --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab panoptic_fpn_r50_fpn_1x_coco configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py ./tools/work_dir/panoptic_fpn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/yolox/yolox_tiny_8x8_300e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolox_tiny_8x8_300e_coco configs/yolox/yolox_tiny_8x8_300e_coco.py ./tools/work_dir/yolox_tiny_8x8_300e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & echo 'configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py' & GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ssdlite_mobilenetv2_scratch_600e_coco configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py ./tools/work_dir/ssdlite_mobilenetv2_scratch_600e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null & ================================================ FILE: DLTA_AI_app/mmdetection/.gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/en/_build/ docs/zh_cn/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ data/ data .vscode .idea .DS_Store # custom *.pkl *.pkl.json *.log.json docs/modelzoo_statistics.md mmdet/.mim work_dirs/ # Pytorch *.pth *.py~ *.sh~ ================================================ FILE: DLTA_AI_app/mmdetection/.owners.yml ================================================ assign: strategy: # random daily-shift-based scedule: "*/1 * * * *" assignees: - Czm369 - hhaAndroid - zytx121 - RangiLyu - BIGWangYuDong - chhluo - ZwwWayne ================================================ FILE: DLTA_AI_app/mmdetection/.pre-commit-config.yaml ================================================ repos: - repo: https://github.com/PyCQA/flake8 rev: 5.0.4 hooks: - id: flake8 - repo: https://github.com/PyCQA/isort rev: 5.10.1 hooks: - id: isort - repo: https://github.com/pre-commit/mirrors-yapf rev: v0.32.0 hooks: - id: yapf - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.3.0 hooks: - id: trailing-whitespace - id: check-yaml - id: end-of-file-fixer - id: requirements-txt-fixer - id: double-quote-string-fixer - id: check-merge-conflict - id: fix-encoding-pragma args: ["--remove"] - id: mixed-line-ending args: ["--fix=lf"] - repo: https://github.com/codespell-project/codespell rev: v2.2.1 hooks: - id: codespell - repo: https://github.com/executablebooks/mdformat rev: 0.7.9 hooks: - id: mdformat args: ["--number"] additional_dependencies: - mdformat-openmmlab - mdformat_frontmatter - linkify-it-py - repo: https://github.com/myint/docformatter rev: v1.3.1 hooks: - id: docformatter args: ["--in-place", "--wrap-descriptions", "79"] - repo: https://github.com/open-mmlab/pre-commit-hooks rev: v0.2.0 # Use the ref you want to point at hooks: - id: check-algo-readme - id: check-copyright args: ["mmdet"] # replace the dir_to_check with your expected directory to check ================================================ FILE: DLTA_AI_app/mmdetection/.readthedocs.yml ================================================ version: 2 formats: all python: version: 3.7 install: - requirements: requirements/docs.txt - requirements: requirements/readthedocs.txt ================================================ FILE: DLTA_AI_app/mmdetection/CITATION.cff ================================================ cff-version: 1.2.0 message: "If you use this software, please cite it as below." authors: - name: "MMDetection Contributors" title: "OpenMMLab Detection Toolbox and Benchmark" date-released: 2018-08-22 url: "https://github.com/open-mmlab/mmdetection" license: Apache-2.0 ================================================ FILE: DLTA_AI_app/mmdetection/LICENSE ================================================ Copyright 2018-2023 OpenMMLab. All rights reserved. Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018-2023 OpenMMLab. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: DLTA_AI_app/mmdetection/MANIFEST.in ================================================ include requirements/*.txt include mmdet/VERSION include mmdet/.mim/model-index.yml include mmdet/.mim/demo/*/* recursive-include mmdet/.mim/configs *.py *.yml recursive-include mmdet/.mim/tools *.sh *.py ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/cityscapes_detection.py ================================================ # dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 1024), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=1, workers_per_gpu=2, train=dict( type='RepeatDataset', times=8, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_train.json', img_prefix=data_root + 'leftImg8bit/train/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', img_prefix=data_root + 'leftImg8bit/val/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_test.json', img_prefix=data_root + 'leftImg8bit/test/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/cityscapes_instance.py ================================================ # dataset settings dataset_type = 'CityscapesDataset' data_root = 'data/cityscapes/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 1024), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=1, workers_per_gpu=2, train=dict( type='RepeatDataset', times=8, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_train.json', img_prefix=data_root + 'leftImg8bit/train/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json', img_prefix=data_root + 'leftImg8bit/val/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_test.json', img_prefix=data_root + 'leftImg8bit/test/', pipeline=test_pipeline)) evaluation = dict(metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/coco_detection.py ================================================ # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/coco_instance.py ================================================ # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/coco_instance_semantic.py ================================================ # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 8), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', seg_prefix=data_root + 'stuffthingmaps/train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/coco_panoptic.py ================================================ # dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 4), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/panoptic_train2017.json', img_prefix=data_root + 'train2017/', seg_prefix=data_root + 'annotations/panoptic_train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/panoptic_val2017.json', img_prefix=data_root + 'val2017/', seg_prefix=data_root + 'annotations/panoptic_val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/panoptic_val2017.json', img_prefix=data_root + 'val2017/', seg_prefix=data_root + 'annotations/panoptic_val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric=['PQ']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/deepfashion.py ================================================ # dataset settings dataset_type = 'DeepFashionDataset' data_root = 'data/DeepFashion/In-shop/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(750, 1101), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(750, 1101), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( imgs_per_gpu=2, workers_per_gpu=1, train=dict( type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', img_prefix=data_root + 'Img/', pipeline=train_pipeline, data_root=data_root), val=dict( type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', img_prefix=data_root + 'Img/', pipeline=test_pipeline, data_root=data_root), test=dict( type=dataset_type, ann_file=data_root + 'annotations/DeepFashion_segmentation_gallery.json', img_prefix=data_root + 'Img/', pipeline=test_pipeline, data_root=data_root)) evaluation = dict(interval=5, metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/lvis_v0.5_instance.py ================================================ # dataset settings _base_ = 'coco_instance.py' dataset_type = 'LVISV05Dataset' data_root = 'data/lvis_v0.5/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( _delete_=True, type='ClassBalancedDataset', oversample_thr=1e-3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v0.5_train.json', img_prefix=data_root + 'train2017/')), val=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v0.5_val.json', img_prefix=data_root + 'val2017/'), test=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v0.5_val.json', img_prefix=data_root + 'val2017/')) evaluation = dict(metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/lvis_v1_instance.py ================================================ # dataset settings _base_ = 'coco_instance.py' dataset_type = 'LVISV1Dataset' data_root = 'data/lvis_v1/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( _delete_=True, type='ClassBalancedDataset', oversample_thr=1e-3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_train.json', img_prefix=data_root)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root), test=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root)) evaluation = dict(metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/openimages_detection.py ================================================ # dataset settings dataset_type = 'OpenImagesDataset' data_root = 'data/OpenImages/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, denorm_bbox=True), dict(type='Resize', img_scale=(1024, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1024, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ], ), ] data = dict( samples_per_gpu=2, workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory train=dict( type=dataset_type, ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv', img_prefix=data_root + 'OpenImages/train/', label_file=data_root + 'annotations/class-descriptions-boxable.csv', hierarchy_file=data_root + 'annotations/bbox_labels_600_hierarchy.json', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/validation-annotations-bbox.csv', img_prefix=data_root + 'OpenImages/validation/', label_file=data_root + 'annotations/class-descriptions-boxable.csv', hierarchy_file=data_root + 'annotations/bbox_labels_600_hierarchy.json', meta_file=data_root + 'annotations/validation-image-metas.pkl', image_level_ann_file=data_root + 'annotations/validation-annotations-human-imagelabels-boxable.csv', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/validation-annotations-bbox.csv', img_prefix=data_root + 'OpenImages/validation/', label_file=data_root + 'annotations/class-descriptions-boxable.csv', hierarchy_file=data_root + 'annotations/bbox_labels_600_hierarchy.json', meta_file=data_root + 'annotations/validation-image-metas.pkl', image_level_ann_file=data_root + 'annotations/validation-annotations-human-imagelabels-boxable.csv', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='mAP') ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/voc0712.py ================================================ # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1000, 600), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=[ data_root + 'VOC2007/ImageSets/Main/trainval.txt', data_root + 'VOC2012/ImageSets/Main/trainval.txt' ], img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='mAP') ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/datasets/wider_face.py ================================================ # dataset settings dataset_type = 'WIDERFaceDataset' data_root = 'data/WIDERFace/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=60, workers_per_gpu=2, train=dict( type='RepeatDataset', times=2, dataset=dict( type=dataset_type, ann_file=data_root + 'train.txt', img_prefix=data_root + 'WIDER_train/', min_size=17, pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'val.txt', img_prefix=data_root + 'WIDER_val/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'val.txt', img_prefix=data_root + 'WIDER_val/', pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/default_runtime.py ================================================ checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable custom_hooks = [dict(type='NumClassCheckHook')] dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] # disable opencv multithreading to avoid system being overloaded opencv_num_threads = 0 # set multi-process start method as `fork` to speed up the training mp_start_method = 'fork' # Default setting for scaling LR automatically # - `enable` means enable scaling LR automatically # or not by default. # - `base_batch_size` = (8 GPUs) x (2 samples per GPU). auto_scale_lr = dict(enable=False, base_batch_size=16) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py ================================================ # model settings model = dict( type='CascadeRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='CascadeRoIHead', num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False) ]), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/cascade_rcnn_r50_fpn.py ================================================ # model settings model = dict( type='CascadeRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='CascadeRoIHead', num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ]), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False) ]), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/fast_rcnn_r50_fpn.py ================================================ # model settings model = dict( type='FastRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py ================================================ # model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', shared_head=dict( type='ResLayer', depth=50, stage=3, stride=2, dilation=1, style='caffe', norm_cfg=norm_cfg, norm_eval=True, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=1024, featmap_strides=[16]), bbox_head=dict( type='BBoxHead', with_avg_pool=True, roi_feat_size=7, in_channels=2048, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=6000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py ================================================ # model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, strides=(1, 2, 2, 1), dilations=(1, 1, 1, 2), out_indices=(3, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=2048, feat_channels=2048, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=2048, featmap_strides=[16]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=2048, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms=dict(type='nms', iou_threshold=0.7), nms_pre=6000, max_per_img=1000, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/faster_rcnn_r50_fpn.py ================================================ # model settings model = dict( type='FasterRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100) # soft-nms is also supported for rcnn testing # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) )) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py ================================================ # model settings norm_cfg = dict(type='BN', requires_grad=False) model = dict( type='MaskRCNN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', shared_head=dict( type='ResLayer', depth=50, stage=3, stride=2, dilation=1, style='caffe', norm_cfg=norm_cfg, norm_eval=True), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=1024, featmap_strides=[16]), bbox_head=dict( type='BBoxHead', with_avg_pool=True, roi_feat_size=7, in_channels=2048, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=None, mask_head=dict( type='FCNMaskHead', num_convs=0, in_channels=2048, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=14, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=6000, nms=dict(type='nms', iou_threshold=0.7), max_per_img=1000, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py ================================================ # model settings model = dict( type='MaskRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/retinanet_r50_fpn.py ================================================ # model settings model = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), bbox_head=dict( type='RetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/rpn_r50_caffe_c4.py ================================================ # model settings model = dict( type='RPN', backbone=dict( type='ResNet', depth=50, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=(2, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=None, rpn_head=dict( type='RPNHead', in_channels=1024, feat_channels=1024, anchor_generator=dict( type='AnchorGenerator', scales=[2, 4, 8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[16]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=12000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/rpn_r50_fpn.py ================================================ # model settings model = dict( type='RPN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False)), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/models/ssd300.py ================================================ # model settings input_size = 300 model = dict( type='SingleStageDetector', backbone=dict( type='SSDVGG', depth=16, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')), neck=dict( type='SSDNeck', in_channels=(512, 1024), out_channels=(512, 1024, 512, 256, 256, 256), level_strides=(2, 2, 1, 1), level_paddings=(1, 1, 0, 0), l2_norm_scale=20), bbox_head=dict( type='SSDHead', in_channels=(512, 1024, 512, 256, 256, 256), num_classes=80, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=input_size, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2])), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False), test_cfg=dict( nms_pre=1000, nms=dict(type='nms', iou_threshold=0.45), min_bbox_size=0, score_thr=0.02, max_per_img=200)) cudnn_benchmark = True ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/schedules/schedule_1x.py ================================================ # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/schedules/schedule_20e.py ================================================ # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/_base_/schedules/schedule_2x.py ================================================ # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) albu_train_transforms = [ dict( type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict( type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict( type='OneOf', transforms=[ dict( type='RGBShift', r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1.0), dict( type='HueSaturationValue', hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=1.0) ], p=0.1), dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='Pad', size_divisor=32), dict( type='Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_labels'], min_visibility=0.0, filter_lost_elements=True), keymap={ 'img': 'image', 'gt_masks': 'masks', 'gt_bboxes': 'bboxes' }, update_pad_shape=False, skip_img_without_anno=True), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'], meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg', 'pad_shape', 'scale_factor')) ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/atss/atss_r101_fpn_1x_coco.py ================================================ _base_ = './atss_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/atss/atss_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/atss/metafile.yml ================================================ Collections: - Name: ATSS Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ATSS - FPN - ResNet Paper: URL: https://arxiv.org/abs/1912.02424 Title: 'Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection' README: configs/atss/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/atss.py#L6 Version: v2.0.0 Models: - Name: atss_r50_fpn_1x_coco In Collection: ATSS Config: configs/atss/atss_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.7 inference time (ms/im): - value: 50.76 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth - Name: atss_r101_fpn_1x_coco In Collection: ATSS Config: configs/atss/atss_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 5.6 inference time (ms/im): - value: 81.3 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py ================================================ # We follow the original implementation which # adopts the Caffe pre-trained backbone. _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='AutoAssign', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5, relu_before_extra_convs=True, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')), bbox_head=dict( type='AutoAssignHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 16, 32, 64, 128], loss_bbox=dict(type='GIoULoss', loss_weight=5.0)), train_cfg=None, test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_norm_cfg = dict( mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(lr=0.01, paramwise_cfg=dict(norm_decay_mult=0.)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 1000, step=[8, 11]) total_epochs = 12 ================================================ FILE: DLTA_AI_app/mmdetection/configs/autoassign/metafile.yml ================================================ Collections: - Name: AutoAssign Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - AutoAssign - FPN - ResNet Paper: URL: https://arxiv.org/abs/2007.03496 Title: 'AutoAssign: Differentiable Label Assignment for Dense Object Detection' README: configs/autoassign/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/autoassign.py#L6 Version: v2.12.0 Models: - Name: autoassign_r50_fpn_8x2_1x_coco In Collection: AutoAssign Config: configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py Metadata: Training Memory (GB): 4.08 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( neck=dict( type='FPN_CARAFE', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5, start_level=0, end_level=-1, norm_cfg=None, act_cfg=None, order=('conv', 'norm', 'act'), upsample_cfg=dict( type='carafe', up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1, compressed_channels=64))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( neck=dict( type='FPN_CARAFE', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5, start_level=0, end_level=-1, norm_cfg=None, act_cfg=None, order=('conv', 'norm', 'act'), upsample_cfg=dict( type='carafe', up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1, compressed_channels=64)), roi_head=dict( mask_head=dict( upsample_cfg=dict( type='carafe', scale_factor=2, up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1, compressed_channels=64)))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/carafe/metafile.yml ================================================ Collections: - Name: CARAFE Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RPN - FPN_CARAFE - ResNet - RoIPool Paper: URL: https://arxiv.org/abs/1905.02188 Title: 'CARAFE: Content-Aware ReAssembly of FEatures' README: configs/carafe/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/necks/fpn_carafe.py#L11 Version: v2.12.0 Models: - Name: faster_rcnn_r50_fpn_carafe_1x_coco In Collection: CARAFE Config: configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py Metadata: Training Memory (GB): 4.26 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth - Name: mask_rcnn_r50_fpn_carafe_1x_coco In Collection: CARAFE Config: configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py Metadata: Training Memory (GB): 4.31 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py'] model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py ================================================ _base_ = ['./cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'] model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py ================================================ _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py ================================================ _base_ = [ '../common/mstrain_3x_coco_instance.py', '../_base_/models/cascade_mask_rcnn_r50_fpn.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py ================================================ _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py ================================================ _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py ================================================ _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py ================================================ _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './cascade_rcnn_r50_fpn_1x_coco.py' model = dict( type='CascadeRCNN', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py ================================================ _base_ = './cascade_rcnn_r50_fpn_20e_coco.py' model = dict( type='CascadeRCNN', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rcnn/metafile.yml ================================================ Collections: - Name: Cascade R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Cascade R-CNN - FPN - RPN - ResNet - RoIAlign Paper: URL: http://dx.doi.org/10.1109/tpami.2019.2956516 Title: 'Cascade R-CNN: Delving into High Quality Object Detection' README: configs/cascade_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/cascade_rcnn.py#L6 Version: v2.0.0 - Name: Cascade Mask R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Cascade R-CNN - FPN - RPN - ResNet - RoIAlign Paper: URL: http://dx.doi.org/10.1109/tpami.2019.2956516 Title: 'Cascade R-CNN: Delving into High Quality Object Detection' README: configs/cascade_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/cascade_rcnn.py#L6 Version: v2.0.0 Models: - Name: cascade_rcnn_r50_caffe_fpn_1x_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 4.2 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth - Name: cascade_rcnn_r50_fpn_1x_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.4 inference time (ms/im): - value: 62.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth - Name: cascade_rcnn_r50_fpn_20e_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py Metadata: Training Memory (GB): 4.4 inference time (ms/im): - value: 62.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth - Name: cascade_rcnn_r101_caffe_fpn_1x_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 6.2 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth - Name: cascade_rcnn_r101_fpn_1x_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 74.07 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth - Name: cascade_rcnn_r101_fpn_20e_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 74.07 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth - Name: cascade_rcnn_x101_32x4d_fpn_1x_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.6 inference time (ms/im): - value: 91.74 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth - Name: cascade_rcnn_x101_32x4d_fpn_20e_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py Metadata: Training Memory (GB): 7.6 Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth - Name: cascade_rcnn_x101_64x4d_fpn_1x_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.7 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth - Name: cascade_rcnn_x101_64x4d_fpn_20e_coco In Collection: Cascade R-CNN Config: configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py Metadata: Training Memory (GB): 10.7 Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth - Name: cascade_mask_rcnn_r50_caffe_fpn_1x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 5.9 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth - Name: cascade_mask_rcnn_r50_fpn_1x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 89.29 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth - Name: cascade_mask_rcnn_r50_fpn_20e_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 89.29 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth - Name: cascade_mask_rcnn_r101_caffe_fpn_1x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 7.8 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth - Name: cascade_mask_rcnn_r101_fpn_1x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 7.9 inference time (ms/im): - value: 102.04 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth - Name: cascade_mask_rcnn_r101_fpn_20e_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py Metadata: Training Memory (GB): 7.9 inference time (ms/im): - value: 102.04 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_1x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 9.2 inference time (ms/im): - value: 116.28 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_20e_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py Metadata: Training Memory (GB): 9.2 inference time (ms/im): - value: 116.28 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth - Name: cascade_mask_rcnn_x101_64x4d_fpn_1x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 12.2 inference time (ms/im): - value: 149.25 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth - Name: cascade_mask_rcnn_x101_64x4d_fpn_20e_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py Metadata: Training Memory (GB): 12.2 Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth - Name: cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.7 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651-6e29b3a6.pth - Name: cascade_mask_rcnn_r50_fpn_mstrain_3x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.9 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719-5bdc3824.pth - Name: cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 7.7 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620-a5bd2389.pth - Name: cascade_mask_rcnn_r101_fpn_mstrain_3x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 7.8 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236-51a2d363.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 9.0 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234-40773067.pth - Name: cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 12.1 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640-9ff7e76f.pth - Name: cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco In Collection: Cascade Mask R-CNN Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 12.0 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311-d3e64ba0.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), roi_head=dict( bbox_head=dict( bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rcnn=dict( assigner=dict( pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), sampler=dict(num=256))), test_cfg=dict(rcnn=dict(score_thr=1e-3))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=300), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=300), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['proposals']), dict( type='ToDataContainer', fields=[dict(key='proposals', stack=False)]), dict(type='Collect', keys=['img', 'proposals']), ]) ] data = dict( train=dict( proposal_file=data_root + 'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl', pipeline=train_pipeline), val=dict( proposal_file=data_root + 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl', pipeline=test_pipeline), test=dict( proposal_file=data_root + 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl', pipeline=test_pipeline)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' rpn_weight = 0.7 model = dict( rpn_head=dict( _delete_=True, type='CascadeRPNHead', num_stages=2, stages=[ dict( type='StageCascadeRPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[1.0], strides=[4, 8, 16, 32, 64]), adapt_cfg=dict(type='dilation', dilation=3), bridged_feature=True, sampling=False, with_cls=False, reg_decoded_bbox=True, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=(.0, .0, .0, .0), target_stds=(0.1, 0.1, 0.5, 0.5)), loss_bbox=dict( type='IoULoss', linear=True, loss_weight=10.0 * rpn_weight)), dict( type='StageCascadeRPNHead', in_channels=256, feat_channels=256, adapt_cfg=dict(type='offset'), bridged_feature=False, sampling=True, with_cls=True, reg_decoded_bbox=True, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=(.0, .0, .0, .0), target_stds=(0.05, 0.05, 0.1, 0.1)), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0 * rpn_weight), loss_bbox=dict( type='IoULoss', linear=True, loss_weight=10.0 * rpn_weight)) ]), roi_head=dict( bbox_head=dict( bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=[ dict( assigner=dict( type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5), allowed_border=-1, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False) ], rpn_proposal=dict(max_per_img=300, nms=dict(iou_threshold=0.8)), rcnn=dict( assigner=dict( pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), sampler=dict(type='RandomSampler', num=256))), test_cfg=dict( rpn=dict(max_per_img=300, nms=dict(iou_threshold=0.8)), rcnn=dict(score_thr=1e-3))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='CascadeRPNHead', num_stages=2, stages=[ dict( type='StageCascadeRPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[1.0], strides=[4, 8, 16, 32, 64]), adapt_cfg=dict(type='dilation', dilation=3), bridged_feature=True, sampling=False, with_cls=False, reg_decoded_bbox=True, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=(.0, .0, .0, .0), target_stds=(0.1, 0.1, 0.5, 0.5)), loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)), dict( type='StageCascadeRPNHead', in_channels=256, feat_channels=256, adapt_cfg=dict(type='offset'), bridged_feature=False, sampling=True, with_cls=True, reg_decoded_bbox=True, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=(.0, .0, .0, .0), target_stds=(0.05, 0.05, 0.1, 0.1)), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)) ]), train_cfg=dict(rpn=[ dict( assigner=dict( type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5), allowed_border=-1, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.3, ignore_iof_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False) ]), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.8), min_bbox_size=0))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cascade_rpn/metafile.yml ================================================ Collections: - Name: Cascade RPN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Cascade RPN - FPN - ResNet Paper: URL: https://arxiv.org/abs/1909.06720 Title: 'Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution' README: configs/cascade_rpn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/dense_heads/cascade_rpn_head.py#L538 Version: v2.8.0 Models: - Name: crpn_fast_rcnn_r50_caffe_fpn_1x_coco In Collection: Cascade RPN Config: configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco/crpn_fast_rcnn_r50_caffe_fpn_1x_coco-cb486e66.pth - Name: crpn_faster_rcnn_r50_caffe_fpn_1x_coco In Collection: Cascade RPN Config: configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/centernet/centernet_resnet18_140e_coco.py ================================================ _base_ = './centernet_resnet18_dcnv2_140e_coco.py' model = dict(neck=dict(use_dcn=False)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='CenterNet', backbone=dict( type='ResNet', depth=18, norm_eval=False, norm_cfg=dict(type='BN'), init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict( type='CTResNetNeck', in_channel=512, num_deconv_filters=(256, 128, 64), num_deconv_kernels=(4, 4, 4), use_dcn=True), bbox_head=dict( type='CenterNetHead', num_classes=80, in_channel=64, feat_channel=64, loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0), loss_wh=dict(type='L1Loss', loss_weight=0.1), loss_offset=dict(type='L1Loss', loss_weight=1.0)), train_cfg=None, test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100)) # We fixed the incorrect img_norm_cfg problem in the source code. img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True, color_type='color'), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='RandomCenterCropPad', crop_size=(512, 512), ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), mean=[0, 0, 0], std=[1, 1, 1], to_rgb=True, test_pad_mode=None), dict(type='Resize', img_scale=(512, 512), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='MultiScaleFlipAug', scale_factor=1.0, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict( type='RandomCenterCropPad', ratios=None, border=None, mean=[0, 0, 0], std=[1, 1, 1], to_rgb=True, test_mode=True, test_pad_mode=['logical_or', 31], test_pad_add_pix=1), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict( type='Collect', meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg', 'border'), keys=['img']) ]) ] dataset_type = 'CocoDataset' data_root = 'data/coco/' # Use RepeatDataset to speed up training data = dict( samples_per_gpu=16, workers_per_gpu=4, train=dict( _delete_=True, type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer # Based on the default settings of modern detectors, the SGD effect is better # than the Adam in the source code, so we use SGD default settings and # if you use adam+lr5e-4, the map is 29.1. optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) # learning policy # Based on the default settings of modern detectors, we added warmup settings. lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 1000, step=[18, 24]) # the real step is [18*5, 24*5] runner = dict(max_epochs=28) # the real epoch is 28*5=140 # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (16 samples per GPU) auto_scale_lr = dict(base_batch_size=128) ================================================ FILE: DLTA_AI_app/mmdetection/configs/centernet/metafile.yml ================================================ Collections: - Name: CenterNet Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x TITANXP GPUs Architecture: - ResNet Paper: URL: https://arxiv.org/abs/1904.07850 Title: 'Objects as Points' README: configs/centernet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.13.0/mmdet/models/detectors/centernet.py#L10 Version: v2.13.0 Models: - Name: centernet_resnet18_dcnv2_140e_coco In Collection: CenterNet Config: configs/centernet/centernet_resnet18_dcnv2_140e_coco.py Metadata: Batch Size: 128 Training Memory (GB): 3.47 Epochs: 140 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 29.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth - Name: centernet_resnet18_140e_coco In Collection: CenterNet Config: configs/centernet/centernet_resnet18_140e_coco.py Metadata: Batch Size: 128 Training Memory (GB): 3.45 Epochs: 140 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 25.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py ================================================ _base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, 2, 2, 2, 2, 4], norm_cfg=dict(type='BN', requires_grad=True)), neck=None, bbox_head=dict( type='CentripetalHead', num_classes=80, in_channels=256, num_feat_levels=2, corner_emb_channels=0, loss_heatmap=dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1), loss_guiding_shift=dict( type='SmoothL1Loss', beta=1.0, loss_weight=0.05), loss_centripetal_shift=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1)), # training and testing settings train_cfg=None, test_cfg=dict( corner_topk=100, local_maximum_kernel=3, distance_threshold=0.5, score_thr=0.05, max_per_img=100, nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) # data settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='RandomCenterCropPad', crop_size=(511, 511), ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), test_mode=False, test_pad_mode=None, **img_norm_cfg), dict(type='Resize', img_scale=(511, 511), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='MultiScaleFlipAug', scale_factor=1.0, flip=True, transforms=[ dict(type='Resize'), dict( type='RandomCenterCropPad', crop_size=None, ratios=None, border=None, test_mode=True, test_pad_mode=['logical_or', 127], **img_norm_cfg), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict( type='Collect', keys=['img'], meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'img_norm_cfg', 'border')), ]) ] data = dict( samples_per_gpu=6, workers_per_gpu=3, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='Adam', lr=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[190]) runner = dict(type='EpochBasedRunner', max_epochs=210) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (16 GPUs) x (6 samples per GPU) auto_scale_lr = dict(base_batch_size=96) ================================================ FILE: DLTA_AI_app/mmdetection/configs/centripetalnet/metafile.yml ================================================ Collections: - Name: CentripetalNet Metadata: Training Data: COCO Training Techniques: - Adam Training Resources: 16x V100 GPUs Architecture: - Corner Pooling - Stacked Hourglass Network Paper: URL: https://arxiv.org/abs/2003.09119 Title: 'CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection' README: configs/centripetalnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/cornernet.py#L9 Version: v2.5.0 Models: - Name: centripetalnet_hourglass104_mstest_16x6_210e_coco In Collection: CentripetalNet Config: configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py Metadata: Batch Size: 96 Training Memory (GB): 16.7 inference time (ms/im): - value: 270.27 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 210 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_detection.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict(init_cfg=None), roi_head=dict( bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))) # optimizer # lr is set for a batch size of 8 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, # [7] yields higher performance than [6] step=[7]) runner = dict( type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64 log_config = dict(interval=100) # For better, more stable performance initialize from COCO load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (1 samples per GPU) auto_scale_lr = dict(base_batch_size=8) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict(init_cfg=None), roi_head=dict( bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=8, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) # optimizer # lr is set for a batch size of 8 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, # [7] yields higher performance than [6] step=[7]) runner = dict( type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64 log_config = dict(interval=100) # For better, more stable performance initialize from COCO load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (1 samples per GPU) auto_scale_lr = dict(base_batch_size=8) ================================================ FILE: DLTA_AI_app/mmdetection/configs/common/lsj_100e_coco_instance.py ================================================ _base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) image_size = (1024, 1024) file_client_args = dict(backend='disk') # comment out the code below to use different file client # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=image_size, ratio_range=(0.1, 2.0), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=image_size, recompute_bbox=True, allow_negative_crop=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=image_size), # padding to image_size leads 0.5+ mAP dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=4, # simply change this from 2 to 16 for 50e - 400e training. dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=5, metric=['bbox', 'segm']) # optimizer assumes bs=64 optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) optimizer_config = dict(grad_clip=None) lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.067, step=[22, 24]) runner = dict(type='EpochBasedRunner', max_epochs=25) ================================================ FILE: DLTA_AI_app/mmdetection/configs/common/mstrain-poly_3x_coco_instance.py ================================================ _base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric=['bbox', 'segm']) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # Experiments show that using step=[9, 11] has higher performance lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[9, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/common/mstrain_3x_coco.py ================================================ _base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # Experiments show that using step=[9, 11] has higher performance lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[9, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/common/mstrain_3x_coco_instance.py ================================================ _base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric=['bbox', 'segm']) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # Experiments show that using step=[9, 11] has higher performance lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[9, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/common/ssj_270k_coco_instance.py ================================================ _base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) image_size = (1024, 1024) file_client_args = dict(backend='disk') # Standard Scale Jittering (SSJ) resizes and crops an image # with a resize range of 0.8 to 1.25 of the original image size. train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=image_size, ratio_range=(0.8, 1.25), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=image_size, recompute_bbox=True, allow_negative_crop=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=image_size), # padding to image_size leads 0.5+ mAP dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=6000, metric=['bbox', 'segm']) # optimizer assumes batch_size = (32 GPUs) x (2 samples per GPU) optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) optimizer_config = dict(grad_clip=None) # lr steps at [0.9, 0.95, 0.975] of the maximum iterations lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[243000, 256500, 263250]) checkpoint_config = dict(interval=6000) # The model is trained by 270k iterations with batch_size 64, # which is roughly equivalent to 144 epochs. runner = dict(type='IterBasedRunner', max_iters=270000) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/common/ssj_scp_270k_coco_instance.py ================================================ _base_ = '../_base_/default_runtime.py' # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) image_size = (1024, 1024) file_client_args = dict(backend='disk') # Standard Scale Jittering (SSJ) resizes and crops an image # with a resize range of 0.8 to 1.25 of the original image size. load_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=image_size, ratio_range=(0.8, 1.25), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=image_size, recompute_bbox=True, allow_negative_crop=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Pad', size=image_size), ] train_pipeline = [ dict(type='CopyPaste', max_num_pasted=100), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='MultiImageMixDataset', dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=load_pipeline), pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=6000, metric=['bbox', 'segm']) # optimizer assumes batch_size = (32 GPUs) x (2 samples per GPU) optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) optimizer_config = dict(grad_clip=None) # lr steps at [0.9, 0.95, 0.975] of the maximum iterations lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[243000, 256500, 263250]) checkpoint_config = dict(interval=6000) # The model is trained by 270k iterations with batch_size 64, # which is roughly equivalent to 144 epochs. runner = dict(type='IterBasedRunner', max_iters=270000) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py ================================================ _base_ = './cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' # noqa # please install mmcls>=0.22.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa model = dict( backbone=dict( _delete_=True, type='mmcls.ConvNeXt', arch='small', out_indices=[0, 1, 2, 3], drop_path_rate=0.6, layer_scale_init_value=1.0, gap_before_final_norm=False, init_cfg=dict( type='Pretrained', checkpoint=checkpoint_file, prefix='backbone.'))) optimizer = dict( _delete_=True, constructor='LearningRateDecayOptimizerConstructor', type='AdamW', lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg={ 'decay_rate': 0.7, 'decay_type': 'layer_wise', 'num_layers': 12 }) ================================================ FILE: DLTA_AI_app/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.22.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa model = dict( backbone=dict( _delete_=True, type='mmcls.ConvNeXt', arch='tiny', out_indices=[0, 1, 2, 3], drop_path_rate=0.4, layer_scale_init_value=1.0, gap_before_final_norm=False, init_cfg=dict( type='Pretrained', checkpoint=checkpoint_file, prefix='backbone.')), neck=dict(in_channels=[96, 192, 384, 768]), roi_head=dict(bbox_head=[ dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), dict( type='ConvFCBBoxHead', num_shared_convs=4, num_shared_fcs=1, in_channels=256, conv_out_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=False, reg_decoded_bbox=True, norm_cfg=dict(type='SyncBN', requires_grad=True), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) ])) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline), persistent_workers=True) optimizer = dict( _delete_=True, constructor='LearningRateDecayOptimizerConstructor', type='AdamW', lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg={ 'decay_rate': 0.7, 'decay_type': 'layer_wise', 'num_layers': 6 }) lr_config = dict(warmup_iters=1000, step=[27, 33]) runner = dict(max_epochs=36) # you need to set mode='dynamic' if you are using pytorch<=1.5.0 fp16 = dict(loss_scale=dict(init_scale=512)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.22.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa model = dict( backbone=dict( _delete_=True, type='mmcls.ConvNeXt', arch='tiny', out_indices=[0, 1, 2, 3], drop_path_rate=0.4, layer_scale_init_value=1.0, gap_before_final_norm=False, init_cfg=dict( type='Pretrained', checkpoint=checkpoint_file, prefix='backbone.')), neck=dict(in_channels=[96, 192, 384, 768])) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline), persistent_workers=True) optimizer = dict( _delete_=True, constructor='LearningRateDecayOptimizerConstructor', type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg={ 'decay_rate': 0.95, 'decay_type': 'layer_wise', 'num_layers': 6 }) lr_config = dict(warmup_iters=1000, step=[27, 33]) runner = dict(max_epochs=36) # you need to set mode='dynamic' if you are using pytorch<=1.5.0 fp16 = dict(loss_scale=dict(init_scale=512)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/convnext/metafile.yml ================================================ Models: - Name: mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco In Collection: Mask R-CNN Config: configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py Metadata: Training Memory (GB): 7.3 Epochs: 36 Training Data: COCO Training Techniques: - AdamW - Mixed Precision Training Training Resources: 8x A100 GPUs Architecture: - ConvNeXt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953-050731f4.pth Paper: URL: https://arxiv.org/abs/2201.03545 Title: 'A ConvNet for the 2020s' README: configs/convnext/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0 - Name: cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco In Collection: Cascade Mask R-CNN Config: configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py Metadata: Training Memory (GB): 9.0 Epochs: 36 Training Data: COCO Training Techniques: - AdamW - Mixed Precision Training Training Resources: 8x A100 GPUs Architecture: - ConvNeXt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 43.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth Paper: URL: https://arxiv.org/abs/2201.03545 Title: 'A ConvNet for the 2020s' README: configs/convnext/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.25.0 - Name: cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco In Collection: Cascade Mask R-CNN Config: configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py Metadata: Training Memory (GB): 12.3 Epochs: 36 Training Data: COCO Training Techniques: - AdamW - Mixed Precision Training Training Resources: 8x A100 GPUs Architecture: - ConvNeXt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 51.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth Paper: URL: https://arxiv.org/abs/2201.03545 Title: 'A ConvNet for the 2020s' README: configs/convnext/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.25.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py ================================================ _base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, 2, 2, 2, 2, 4], norm_cfg=dict(type='BN', requires_grad=True)), neck=None, bbox_head=dict( type='CornerHead', num_classes=80, in_channels=256, num_feat_levels=2, corner_emb_channels=1, loss_heatmap=dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_embedding=dict( type='AssociativeEmbeddingLoss', pull_weight=0.10, push_weight=0.10), loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), # training and testing settings train_cfg=None, test_cfg=dict( corner_topk=100, local_maximum_kernel=3, distance_threshold=0.5, score_thr=0.05, max_per_img=100, nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) # data settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='RandomCenterCropPad', crop_size=(511, 511), ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), test_mode=False, test_pad_mode=None, **img_norm_cfg), dict(type='Resize', img_scale=(511, 511), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='MultiScaleFlipAug', scale_factor=1.0, flip=True, transforms=[ dict(type='Resize'), dict( type='RandomCenterCropPad', crop_size=None, ratios=None, border=None, test_mode=True, test_pad_mode=['logical_or', 127], **img_norm_cfg), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict( type='Collect', keys=['img'], meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'img_norm_cfg', 'border')), ]) ] data = dict( samples_per_gpu=5, workers_per_gpu=3, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='Adam', lr=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[180]) runner = dict(type='EpochBasedRunner', max_epochs=210) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (10 GPUs) x (5 samples per GPU) auto_scale_lr = dict(base_batch_size=50) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py ================================================ _base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, 2, 2, 2, 2, 4], norm_cfg=dict(type='BN', requires_grad=True)), neck=None, bbox_head=dict( type='CornerHead', num_classes=80, in_channels=256, num_feat_levels=2, corner_emb_channels=1, loss_heatmap=dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_embedding=dict( type='AssociativeEmbeddingLoss', pull_weight=0.10, push_weight=0.10), loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), # training and testing settings train_cfg=None, test_cfg=dict( corner_topk=100, local_maximum_kernel=3, distance_threshold=0.5, score_thr=0.05, max_per_img=100, nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) # data settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='RandomCenterCropPad', crop_size=(511, 511), ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), test_mode=False, test_pad_mode=None, **img_norm_cfg), dict(type='Resize', img_scale=(511, 511), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='MultiScaleFlipAug', scale_factor=1.0, flip=True, transforms=[ dict(type='Resize'), dict( type='RandomCenterCropPad', crop_size=None, ratios=None, border=None, test_mode=True, test_pad_mode=['logical_or', 127], **img_norm_cfg), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict( type='Collect', keys=['img'], meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'img_norm_cfg', 'border')), ]) ] data = dict( samples_per_gpu=3, workers_per_gpu=3, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='Adam', lr=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[180]) runner = dict(type='EpochBasedRunner', max_epochs=210) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (3 samples per GPU) auto_scale_lr = dict(base_batch_size=96) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py ================================================ _base_ = [ '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' ] # model settings model = dict( type='CornerNet', backbone=dict( type='HourglassNet', downsample_times=5, num_stacks=2, stage_channels=[256, 256, 384, 384, 384, 512], stage_blocks=[2, 2, 2, 2, 2, 4], norm_cfg=dict(type='BN', requires_grad=True)), neck=None, bbox_head=dict( type='CornerHead', num_classes=80, in_channels=256, num_feat_levels=2, corner_emb_channels=1, loss_heatmap=dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_embedding=dict( type='AssociativeEmbeddingLoss', pull_weight=0.10, push_weight=0.10), loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), # training and testing settings train_cfg=None, test_cfg=dict( corner_topk=100, local_maximum_kernel=3, distance_threshold=0.5, score_thr=0.05, max_per_img=100, nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) # data settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='RandomCenterCropPad', crop_size=(511, 511), ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), test_mode=False, test_pad_mode=None, **img_norm_cfg), dict(type='Resize', img_scale=(511, 511), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='MultiScaleFlipAug', scale_factor=1.0, flip=True, transforms=[ dict(type='Resize'), dict( type='RandomCenterCropPad', crop_size=None, ratios=None, border=None, test_mode=True, test_pad_mode=['logical_or', 127], **img_norm_cfg), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict( type='Collect', keys=['img'], meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'img_norm_cfg', 'border')), ]) ] data = dict( samples_per_gpu=6, workers_per_gpu=3, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='Adam', lr=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[180]) runner = dict(type='EpochBasedRunner', max_epochs=210) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (6 samples per GPU) auto_scale_lr = dict(base_batch_size=48) ================================================ FILE: DLTA_AI_app/mmdetection/configs/cornernet/metafile.yml ================================================ Collections: - Name: CornerNet Metadata: Training Data: COCO Training Techniques: - Adam Training Resources: 8x V100 GPUs Architecture: - Corner Pooling - Stacked Hourglass Network Paper: URL: https://arxiv.org/abs/1808.01244 Title: 'CornerNet: Detecting Objects as Paired Keypoints' README: configs/cornernet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.3.0/mmdet/models/detectors/cornernet.py#L9 Version: v2.3.0 Models: - Name: cornernet_hourglass104_mstest_10x5_210e_coco In Collection: CornerNet Config: configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py Metadata: Training Resources: 10x V100 GPUs Batch Size: 50 Training Memory (GB): 13.9 inference time (ms/im): - value: 238.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 210 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth - Name: cornernet_hourglass104_mstest_8x6_210e_coco In Collection: CornerNet Config: configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py Metadata: Batch Size: 48 Training Memory (GB): 15.9 inference time (ms/im): - value: 238.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 210 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth - Name: cornernet_hourglass104_mstest_32x3_210e_coco In Collection: CornerNet Config: configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py Metadata: Training Resources: 32x V100 GPUs Batch Size: 96 Training Memory (GB): 9.5 inference time (ms/im): - value: 256.41 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 210 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110-1efaea91.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( _delete_=True, type='DeformRoIPoolPack', output_size=7, output_channels=256), out_channels=256, featmap_strides=[4, 8, 16, 32]))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcn/metafile.yml ================================================ Collections: - Name: Deformable Convolutional Networks Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Deformable Convolution Paper: URL: https://arxiv.org/abs/1703.06211 Title: "Deformable Convolutional Networks" README: configs/dcn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 Version: v2.0.0 Models: - Name: faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 56.18 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth - Name: faster_rcnn_r50_fpn_dpool_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py Metadata: Training Memory (GB): 5.0 inference time (ms/im): - value: 58.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth - Name: faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 80 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth - Name: faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 7.3 inference time (ms/im): - value: 100 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth - Name: mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.5 inference time (ms/im): - value: 64.94 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth - Name: mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py Metadata: Training Techniques: - SGD with Momentum - Weight Decay - Mixed Precision Training Training Memory (GB): 3.0 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth - Name: mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 6.5 inference time (ms/im): - value: 85.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth - Name: cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.5 inference time (ms/im): - value: 68.49 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth - Name: cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 90.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth - Name: cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 100 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth - Name: cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 8.0 inference time (ms/im): - value: 116.28 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 9.2 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( _delete_=True, type='ModulatedDeformRoIPoolPack', output_size=7, output_channels=256), out_channels=256, featmap_strides=[4, 8, 16, 32]))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dcnv2/metafile.yml ================================================ Collections: - Name: Deformable Convolutional Networks v2 Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Deformable Convolution Paper: URL: https://arxiv.org/abs/1811.11168 Title: "Deformable ConvNets v2: More Deformable, Better Results" README: configs/dcnv2/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 Version: v2.0.0 Models: - Name: faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.1 inference time (ms/im): - value: 56.82 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth - Name: faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py Metadata: Training Memory (GB): 4.2 inference time (ms/im): - value: 57.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth - Name: faster_rcnn_r50_fpn_mdpool_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py Metadata: Training Memory (GB): 5.8 inference time (ms/im): - value: 60.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth - Name: mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.5 inference time (ms/im): - value: 66.23 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth - Name: mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcn/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 3.1 Training Techniques: - SGD with Momentum - Weight Decay - Mixed Precision Training Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/ddod/ddod_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='DDOD', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='DDODHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_iou=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), train_cfg=dict( # assigner is mean cls_assigner assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8), reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # This `persistent_workers` is only valid when PyTorch>=1.7.0 data = dict(persistent_workers=True) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ddod/metafile.yml ================================================ Collections: - Name: DDOD Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - DDOD - FPN - ResNet Paper: URL: https://arxiv.org/pdf/2107.02963.pdf Title: 'Disentangle Your Dense Object Detector' README: configs/ddod/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/models/detectors/ddod.py#L6 Version: v2.25.0 Models: - Name: ddod_r50_fpn_1x_coco In Collection: DDOD Config: configs/ddod/ddod_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.4 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=15) ================================================ FILE: DLTA_AI_app/mmdetection/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='DeformableDETR', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='ChannelMapper', in_channels=[512, 1024, 2048], kernel_size=1, out_channels=256, act_cfg=None, norm_cfg=dict(type='GN', num_groups=32), num_outs=4), bbox_head=dict( type='DeformableDETRHead', num_query=300, num_classes=80, in_channels=2048, sync_cls_avg_factor=True, as_two_stage=False, transformer=dict( type='DeformableDetrTransformer', encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiScaleDeformableAttention', embed_dims=256), feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DeformableDetrTransformerDecoder', num_layers=6, return_intermediate=True, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=[ dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict( type='MultiScaleDeformableAttention', embed_dims=256) ], feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')))), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True, offset=-0.5), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[ [ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', # The radio of all image in train dataset < 7 # follow the original impl img_scale=[(400, 4200), (500, 4200), (600, 4200)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(filter_empty_gt=False, pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=2e-4, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1), 'sampling_offsets': dict(lr_mult=0.1), 'reference_points': dict(lr_mult=0.1) })) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40]) runner = dict(type='EpochBasedRunner', max_epochs=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (16 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=32) ================================================ FILE: DLTA_AI_app/mmdetection/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py ================================================ _base_ = 'deformable_detr_r50_16x2_50e_coco.py' model = dict(bbox_head=dict(with_box_refine=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py ================================================ _base_ = 'deformable_detr_refine_r50_16x2_50e_coco.py' model = dict(bbox_head=dict(as_two_stage=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/deformable_detr/metafile.yml ================================================ Collections: - Name: Deformable DETR Metadata: Training Data: COCO Training Techniques: - AdamW - Multi Scale Train - Gradient Clip Training Resources: 8x V100 GPUs Architecture: - ResNet - Transformer Paper: URL: https://openreview.net/forum?id=gZ9hCDWe6ke Title: 'Deformable DETR: Deformable Transformers for End-to-End Object Detection' README: configs/deformable_detr/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/deformable_detr.py#L6 Version: v2.12.0 Models: - Name: deformable_detr_r50_16x2_50e_coco In Collection: Deformable DETR Config: configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py Metadata: Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth - Name: deformable_detr_refine_r50_16x2_50e_coco In Collection: Deformable DETR Config: configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py Metadata: Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.pth - Name: deformable_detr_twostage_refine_r50_16x2_50e_coco In Collection: Deformable DETR Config: configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py Metadata: Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), pretrained='torchvision://resnet50', style='pytorch'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='torchvision://resnet50', style='pytorch'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/detectors/detectors_htc_r101_20e_coco.py ================================================ _base_ = '../htc/htc_r101_fpn_20e_coco.py' model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='torchvision://resnet101', style='pytorch'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/detectors/detectors_htc_r50_1x_coco.py ================================================ _base_ = '../htc/htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='torchvision://resnet50', style='pytorch'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/detectors/htc_r50_rfp_1x_coco.py ================================================ _base_ = '../htc/htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), pretrained='torchvision://resnet50', style='pytorch'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/detectors/htc_r50_sac_1x_coco.py ================================================ _base_ = '../htc/htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/detectors/metafile.yml ================================================ Collections: - Name: DetectoRS Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ASPP - FPN - RFP - RPN - ResNet - RoIAlign - SAC Paper: URL: https://arxiv.org/abs/2006.02334 Title: 'DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution' README: configs/detectors/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/backbones/detectors_resnet.py#L205 Version: v2.2.0 Models: - Name: cascade_rcnn_r50_rfp_1x_coco In Collection: DetectoRS Config: configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py Metadata: Training Memory (GB): 7.5 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth - Name: cascade_rcnn_r50_sac_1x_coco In Collection: DetectoRS Config: configs/detectors/cascade_rcnn_r50_sac_1x_coco.py Metadata: Training Memory (GB): 5.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth - Name: detectors_cascade_rcnn_r50_1x_coco In Collection: DetectoRS Config: configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py Metadata: Training Memory (GB): 9.9 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth - Name: htc_r50_rfp_1x_coco In Collection: DetectoRS Config: configs/detectors/htc_r50_rfp_1x_coco.py Metadata: Training Memory (GB): 11.2 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth - Name: htc_r50_sac_1x_coco In Collection: DetectoRS Config: configs/detectors/htc_r50_sac_1x_coco.py Metadata: Training Memory (GB): 9.3 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth - Name: detectors_htc_r50_1x_coco In Collection: DetectoRS Config: configs/detectors/detectors_htc_r50_1x_coco.py Metadata: Training Memory (GB): 13.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/detr/detr_r50_8x2_150e_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='DETR', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), bbox_head=dict( type='DETRHead', num_classes=80, in_channels=2048, transformer=dict( type='Transformer', encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), )), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[100]) runner = dict(type='EpochBasedRunner', max_epochs=150) ================================================ FILE: DLTA_AI_app/mmdetection/configs/detr/metafile.yml ================================================ Collections: - Name: DETR Metadata: Training Data: COCO Training Techniques: - AdamW - Multi Scale Train - Gradient Clip Training Resources: 8x V100 GPUs Architecture: - ResNet - Transformer Paper: URL: https://arxiv.org/abs/2005.12872 Title: 'End-to-End Object Detection with Transformers' README: configs/detr/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/detectors/detr.py#L7 Version: v2.7.0 Models: - Name: detr_r50_8x2_150e_coco In Collection: DETR Config: configs/detr/detr_r50_8x2_150e_coco.py Metadata: Training Memory (GB): 7.9 Epochs: 150 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='DoubleHeadRoIHead', reg_roi_scale_factor=1.3, bbox_head=dict( _delete_=True, type='DoubleConvFCBBoxHead', num_convs=4, num_fcs=2, in_channels=256, conv_out_channels=1024, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/double_heads/metafile.yml ================================================ Collections: - Name: Rethinking Classification and Localization for Object Detection Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - RPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/pdf/1904.06493 Title: 'Rethinking Classification and Localization for Object Detection' README: configs/double_heads/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/roi_heads/double_roi_head.py#L6 Version: v2.0.0 Models: - Name: dh_faster_rcnn_r50_fpn_1x_coco In Collection: Rethinking Classification and Localization for Object Detection Config: configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 6.8 inference time (ms/im): - value: 105.26 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), dict( type='DyHead', in_channels=256, out_channels=256, num_blocks=6, # disable zero_init_offset to follow official implementation zero_init_offset=False) ], bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, pred_kernel_size=1, # follow DyHead official implementation stacked_convs=0, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128], center_offset=0.5), # follow DyHead official implementation bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # use caffe img_norm, size_divisor=128, pillow resize img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=(1333, 800), keep_ratio=True, backend='pillow'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True, backend='pillow'), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), dict(type='DyHead', in_channels=256, out_channels=256, num_blocks=6) ], bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, stacked_convs=0, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py ================================================ _base_ = '../_base_/default_runtime.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa model = dict( type='ATSS', backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(1, 2, 3), # Please only add indices that would be used # in FPN, otherwise some parameter will not be used with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=[ dict( type='FPN', in_channels=[384, 768, 1536], out_channels=256, start_level=0, add_extra_convs='on_output', num_outs=5), dict( type='DyHead', in_channels=256, out_channels=256, num_blocks=6, # disable zero_init_offset to follow official implementation zero_init_offset=False) ], bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, pred_kernel_size=1, # follow DyHead official implementation stacked_convs=0, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128], center_offset=0.5), # follow DyHead official implementation bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(2000, 480), (2000, 1200)], multiscale_mode='range', keep_ratio=True, backend='pillow'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2000, 1200), flip=False, transforms=[ dict(type='Resize', keep_ratio=True, backend='pillow'), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=2, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer_config = dict(grad_clip=None) optimizer = dict( type='AdamW', lr=0.00005, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dyhead/metafile.yml ================================================ Collections: - Name: DyHead Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 4x T4 GPUs Architecture: - ATSS - DyHead - FPN - ResNet - Deformable Convolution - Pyramid Convolution Paper: URL: https://arxiv.org/abs/2106.08322 Title: 'Dynamic Head: Unifying Object Detection Heads with Attentions' README: configs/dyhead/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/necks/dyhead.py#L130 Version: v2.22.0 Models: - Name: atss_r50_caffe_fpn_dyhead_1x_coco In Collection: DyHead Config: configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py Metadata: Training Memory (GB): 5.4 inference time (ms/im): - value: 75.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth - Name: atss_r50_fpn_dyhead_1x_coco In Collection: DyHead Config: configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py Metadata: Training Memory (GB): 4.9 inference time (ms/im): - value: 73.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth - Name: atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco In Collection: DyHead Config: configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py Metadata: Training Memory (GB): 58.4 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 56.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='DynamicRoIHead', bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict(nms=dict(iou_threshold=0.85)), rcnn=dict( dynamic_rcnn=dict( iou_topk=75, beta_topk=10, update_iter_interval=100, initial_iou=0.4, initial_beta=1.0))), test_cfg=dict(rpn=dict(nms=dict(iou_threshold=0.85)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/dynamic_rcnn/metafile.yml ================================================ Collections: - Name: Dynamic R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Dynamic R-CNN - FPN - RPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/pdf/2004.06002 Title: 'Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training' README: configs/dynamic_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/roi_heads/dynamic_roi_head.py#L11 Version: v2.2.0 Models: - Name: dynamic_rcnn_r50_fpn_1x_coco In Collection: Dynamic R-CNN Config: configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.8 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/efficientnet/metafile.yml ================================================ Models: - Name: retinanet_effb3_fpn_crop896_8x4_1x_coco In Collection: RetinaNet Config: configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth Paper: URL: https://arxiv.org/abs/1905.11946v5 Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' README: configs/efficientnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/backbones/efficientnet.py#L159 Version: v2.23.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True norm_cfg = dict(type='BN', requires_grad=True) checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa model = dict( backbone=dict( _delete_=True, type='EfficientNet', arch='b3', drop_path_rate=0.2, out_indices=(3, 4, 5), frozen_stages=0, norm_cfg=dict( type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01), norm_eval=False, init_cfg=dict( type='Pretrained', prefix='backbone', checkpoint=checkpoint)), neck=dict( in_channels=[48, 136, 384], start_level=0, out_channels=256, relu_before_extra_convs=True, no_norm_on_lateral=True, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_size = (896, 896) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=img_size, ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=img_size), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=img_size), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=img_size, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=img_size), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=4, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer_config = dict(grad_clip=None) optimizer = dict( type='SGD', lr=0.04, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[8, 11]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=12) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (4 samples per GPU) auto_scale_lr = dict(base_batch_size=32) ================================================ FILE: DLTA_AI_app/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ], dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ], dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/empirical_attention/metafile.yml ================================================ Collections: - Name: Empirical Attention Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Deformable Convolution - FPN - RPN - ResNet - RoIAlign - Spatial Attention Paper: URL: https://arxiv.org/pdf/1904.05873 Title: 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' README: configs/empirical_attention/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/generalized_attention.py#L10 Version: v2.0.0 Models: - Name: faster_rcnn_r50_fpn_attention_1111_1x_coco In Collection: Empirical Attention Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py Metadata: Training Memory (GB): 8.0 inference time (ms/im): - value: 72.46 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth - Name: faster_rcnn_r50_fpn_attention_0010_1x_coco In Collection: Empirical Attention Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py Metadata: Training Memory (GB): 4.2 inference time (ms/im): - value: 54.35 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth - Name: faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco In Collection: Empirical Attention Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py Metadata: Training Memory (GB): 8.0 inference time (ms/im): - value: 78.74 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth - Name: faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco In Collection: Empirical Attention Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py Metadata: Training Memory (GB): 4.2 inference time (ms/im): - value: 58.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py ================================================ _base_ = './fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py ================================================ _base_ = './fast_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = './fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='BN', requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=2000), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=None), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['proposals']), dict( type='ToDataContainer', fields=[dict(key='proposals', stack=False)]), dict(type='Collect', keys=['img', 'proposals']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/fast_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=2000), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=None), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['proposals']), dict( type='ToDataContainer', fields=[dict(key='proposals', stack=False)]), dict(type='Collect', keys=['img', 'proposals']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl', pipeline=train_pipeline), val=dict( proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline), test=dict( proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py ================================================ _base_ = './fast_rcnn_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py ================================================ _base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py ================================================ _base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_caffe_c4_1x_coco.py' # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_caffe_dc5.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_caffe_dc5.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py ================================================ _base_ = './faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py ================================================ _base_ = 'faster_rcnn_r50_caffe_fpn_1x_coco.py' # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[60000, 80000]) # Runner type runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) checkpoint_config = dict(interval=10000) evaluation = dict(interval=10000, metric='bbox') ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py ================================================ _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' model = dict(roi_head=dict(bbox_head=dict(num_classes=3))) classes = ('person', 'bicycle', 'car') data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py ================================================ _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' model = dict(roi_head=dict(bbox_head=dict(num_classes=1))) classes = ('person', ) data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py ================================================ _base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[16, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py ================================================ _base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py ================================================ _base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[60000, 80000]) # Runner type runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) checkpoint_config = dict(interval=10000) evaluation = dict(interval=10000, metric='bbox') ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_ciou_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='CIoULoss', loss_weight=12.0)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='GIoULoss', loss_weight=10.0)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='IoULoss', loss_weight=10.0)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py ================================================ _base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict(train_cfg=dict(rcnn=dict(sampler=dict(type='OHEMSampler')))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( test_cfg=dict( rcnn=dict( score_thr=0.05, nms=dict(type='soft_nms', iou_threshold=0.5), max_per_img=100))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth' model = dict( backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint))) # `lr` and `weight_decay` have been searched to be optimal. optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.1, paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py ================================================ _base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py ================================================ _base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py ================================================ _base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/faster_rcnn/metafile.yml ================================================ Collections: - Name: Faster R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - RPN - ResNet - RoIPool Paper: URL: https://arxiv.org/abs/1506.01497 Title: "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" README: configs/faster_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/faster_rcnn.py#L6 Version: v2.0.0 Models: - Name: faster_rcnn_r50_caffe_c4_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 35.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152-3f885b85.pth - Name: faster_rcnn_r50_caffe_c4_mstrain_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 35.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527-db276fed.pth - Name: faster_rcnn_r50_caffe_dc5_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909-531f0f43.pth - Name: faster_rcnn_r50_caffe_fpn_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 3.8 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth - Name: faster_rcnn_r50_fpn_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 46.73 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth - Name: faster_rcnn_r50_fpn_fp16_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py Metadata: Training Memory (GB): 3.4 Training Techniques: - SGD with Momentum - Weight Decay - Mixed Precision Training inference time (ms/im): - value: 34.72 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth - Name: faster_rcnn_r50_fpn_2x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 46.73 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth - Name: faster_rcnn_r101_caffe_fpn_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 5.7 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.398_20200504_180057-b269e9dd.pth - Name: faster_rcnn_r101_fpn_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 64.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth - Name: faster_rcnn_r101_fpn_2x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 64.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth - Name: faster_rcnn_x101_32x4d_fpn_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.2 inference time (ms/im): - value: 72.46 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203-cff10310.pth - Name: faster_rcnn_x101_32x4d_fpn_2x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 7.2 inference time (ms/im): - value: 72.46 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.412_20200506_041400-64a12c0b.pth - Name: faster_rcnn_x101_64x4d_fpn_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.3 inference time (ms/im): - value: 106.38 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth - Name: faster_rcnn_x101_64x4d_fpn_2x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 10.3 inference time (ms/im): - value: 106.38 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033-5961fa95.pth - Name: faster_rcnn_r50_fpn_iou_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.9 # re-release Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954-938e81f0.pth - Name: faster_rcnn_r50_fpn_giou_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco-0eada910.pth - Name: faster_rcnn_r50_fpn_bounded_iou_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco-98ad993b.pth - Name: faster_rcnn_r50_caffe_dc5_mstrain_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851-b33d21b9.pth - Name: faster_rcnn_r50_caffe_dc5_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107-34a53b2c.pth - Name: faster_rcnn_r50_caffe_fpn_mstrain_2x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_bbox_mAP-0.397_20200504_231813-10b2de58.pth - Name: faster_rcnn_r50_caffe_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 3.7 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054-1f77628b.pth - Name: faster_rcnn_r50_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 3.9 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822-e10bd31c.pth - Name: faster_rcnn_r101_caffe_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.6 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742-a7ae426d.pth - Name: faster_rcnn_r101_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.8 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822-4d4d2ca8.pth - Name: faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 7.0 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151-16b9b260.pth - Name: faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 10.1 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954-002e082a.pth - Name: faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 10.0 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528-26c63de6.pth - Name: faster_rcnn_r50_fpn_tnr-pretrain_1x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 46.73 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147-efedfda4.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py ================================================ _base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' model = dict( backbone=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), bbox_head=dict( norm_on_bbox=True, centerness_on_reg=True, dcn_on_last_conv=False, center_sampling=True, conv_bias=True, loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), # training and testing settings test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) # dataset settings img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer_config = dict(_delete_=True, grad_clip=None) lr_config = dict(warmup='linear') ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py ================================================ _base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), bbox_head=dict( norm_on_bbox=True, centerness_on_reg=True, dcn_on_last_conv=True, center_sampling=True, conv_bias=True, loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), # training and testing settings test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) # dataset settings img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer_config = dict(_delete_=True, grad_clip=None) lr_config = dict(warmup='linear') ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py ================================================ _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py ================================================ _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py ================================================ _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet101_caffe'))) img_norm_cfg = dict( mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='FCOS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet50_caffe')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', # use P5 num_outs=5, relu_before_extra_convs=True), bbox_head=dict( type='FCOSHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 16, 32, 64, 128], loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) img_norm_cfg = dict( mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='constant', warmup_iters=500, warmup_ratio=1.0 / 3, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py ================================================ # TODO: Remove this config after benchmarking all related configs _base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' data = dict(samples_per_gpu=4, workers_per_gpu=4) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_fp16_1x_bs8x8_coco.py ================================================ _base_ = ['./fcos_r50_caffe_fpn_gn-head_1x_coco.py'] data = dict(samples_per_gpu=8, workers_per_gpu=8) # optimizer optimizer = dict(lr=0.04) fp16 = dict(loss_scale='dynamic') # learning policy # In order to avoid non-convergence in the early stage of # mixed-precision training, the warmup in the lr_config is set to linear, # warmup_iters increases and warmup_ratio decreases. lr_config = dict(warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 10) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py ================================================ _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' img_norm_cfg = dict( mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py ================================================ _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fcos/metafile.yml ================================================ Collections: - Name: FCOS Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - Group Normalization - ResNet Paper: URL: https://arxiv.org/abs/1904.01355 Title: 'FCOS: Fully Convolutional One-Stage Object Detection' README: configs/fcos/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fcos.py#L6 Version: v2.0.0 Models: - Name: fcos_r50_caffe_fpn_gn-head_1x_coco In Collection: FCOS Config: configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py Metadata: Training Memory (GB): 3.6 inference time (ms/im): - value: 44.05 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth - Name: fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco In Collection: FCOS Config: configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py Metadata: Training Memory (GB): 3.7 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth - Name: fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco In Collection: FCOS Config: configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py Metadata: Training Memory (GB): 3.8 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth - Name: fcos_r101_caffe_fpn_gn-head_1x_coco In Collection: FCOS Config: configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py Metadata: Training Memory (GB): 5.5 inference time (ms/im): - value: 57.8 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth - Name: fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco In Collection: FCOS Config: configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py Metadata: Training Memory (GB): 2.6 inference time (ms/im): - value: 43.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth - Name: fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco In Collection: FCOS Config: configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py Metadata: Training Memory (GB): 5.5 inference time (ms/im): - value: 57.8 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth - Name: fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco In Collection: FCOS Config: configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py Metadata: Training Memory (GB): 10.0 inference time (ms/im): - value: 103.09 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py ================================================ _base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py ================================================ _base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py ================================================ _base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py ================================================ _base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py ================================================ _base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py ================================================ _base_ = './fovea_r50_fpn_4x4_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='FOVEA', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, num_outs=5, add_extra_convs='on_input'), bbox_head=dict( type='FoveaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 16, 32, 64, 128], base_edge_list=[16, 32, 64, 128, 256], scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), sigma=0.4, with_deform=False, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=1.50, alpha=0.4, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), # training and testing settings train_cfg=dict(), test_cfg=dict( nms_pre=1000, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) data = dict(samples_per_gpu=4, workers_per_gpu=4) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py ================================================ _base_ = './fovea_r50_fpn_4x4_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/foveabox/metafile.yml ================================================ Collections: - Name: FoveaBox Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 4x V100 GPUs Architecture: - FPN - ResNet Paper: URL: https://arxiv.org/abs/1904.03797 Title: 'FoveaBox: Beyond Anchor-based Object Detector' README: configs/foveabox/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fovea.py#L6 Version: v2.0.0 Models: - Name: fovea_r50_fpn_4x4_1x_coco In Collection: FoveaBox Config: configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py Metadata: Training Memory (GB): 5.6 inference time (ms/im): - value: 41.49 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219-ee4d5303.pth - Name: fovea_r50_fpn_4x4_2x_coco In Collection: FoveaBox Config: configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py Metadata: Training Memory (GB): 5.6 inference time (ms/im): - value: 41.49 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203-2df792b1.pth - Name: fovea_align_r50_fpn_gn-head_4x4_2x_coco In Collection: FoveaBox Config: configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py Metadata: Training Memory (GB): 8.1 inference time (ms/im): - value: 51.55 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth - Name: fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco In Collection: FoveaBox Config: configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py Metadata: Training Memory (GB): 8.1 inference time (ms/im): - value: 54.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205-85ce26cb.pth - Name: fovea_r101_fpn_4x4_1x_coco In Collection: FoveaBox Config: configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py Metadata: Training Memory (GB): 9.2 inference time (ms/im): - value: 57.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219-05e38f1c.pth - Name: fovea_r101_fpn_4x4_2x_coco In Collection: FoveaBox Config: configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py Metadata: Training Memory (GB): 11.7 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208-02320ea4.pth - Name: fovea_align_r101_fpn_gn-head_4x4_2x_coco In Collection: FoveaBox Config: configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py Metadata: Training Memory (GB): 11.7 inference time (ms/im): - value: 68.03 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208-c39a027a.pth - Name: fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco In Collection: FoveaBox Config: configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py Metadata: Training Memory (GB): 11.7 inference time (ms/im): - value: 68.03 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208-649c5eb6.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py ================================================ _base_ = 'faster_rcnn_r50_fpg_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict(out_channels=128, inter_channels=128), rpn_head=dict(in_channels=128), roi_head=dict( bbox_roi_extractor=dict(out_channels=128), bbox_head=dict(in_channels=128))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py ================================================ _base_ = 'faster_rcnn_r50_fpn_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict( type='FPG', in_channels=[256, 512, 1024, 2048], out_channels=256, inter_channels=256, num_outs=5, stack_times=9, paths=['bu'] * 9, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_lateral_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_down_trans=dict( type='interpolation_conv', mode='nearest', kernel_size=3, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'), inplace=False), across_up_trans=None, across_skip_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), output_trans=dict( type='last_conv', kernel_size=3, order=('act', 'conv', 'norm'), inplace=False), norm_cfg=norm_cfg, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg, norm_eval=False), neck=dict(norm_cfg=norm_cfg), roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(max_epochs=50) evaluation = dict(interval=2) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py ================================================ _base_ = 'mask_rcnn_r50_fpg_crop640_50e_coco.py' model = dict( neck=dict(out_channels=128, inter_channels=128), rpn_head=dict(in_channels=128), roi_head=dict( bbox_roi_extractor=dict(out_channels=128), bbox_head=dict(in_channels=128), mask_roi_extractor=dict(out_channels=128), mask_head=dict(in_channels=128))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py ================================================ _base_ = 'mask_rcnn_r50_fpn_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict( type='FPG', in_channels=[256, 512, 1024, 2048], out_channels=256, inter_channels=256, num_outs=5, stack_times=9, paths=['bu'] * 9, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_lateral_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_down_trans=dict( type='interpolation_conv', mode='nearest', kernel_size=3, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'), inplace=False), across_up_trans=None, across_skip_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), output_trans=dict( type='last_conv', kernel_size=3, order=('act', 'conv', 'norm'), inplace=False), norm_cfg=norm_cfg, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg, norm_eval=False), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, norm_cfg=norm_cfg, num_outs=5), roi_head=dict( bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(max_epochs=50) evaluation = dict(interval=2) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/metafile.yml ================================================ Collections: - Name: Feature Pyramid Grids Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Feature Pyramid Grids Paper: URL: https://arxiv.org/abs/2004.03580 Title: 'Feature Pyramid Grids' README: configs/fpg/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.10.0/mmdet/models/necks/fpg.py#L101 Version: v2.10.0 Models: - Name: faster_rcnn_r50_fpg_crop640_50e_coco In Collection: Feature Pyramid Grids Config: configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py Metadata: Training Memory (GB): 20.0 Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856-74109f42.pth - Name: faster_rcnn_r50_fpg-chn128_crop640_50e_coco In Collection: Feature Pyramid Grids Config: configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py Metadata: Training Memory (GB): 11.9 Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857-9376aa9d.pth - Name: mask_rcnn_r50_fpg_crop640_50e_coco In Collection: Feature Pyramid Grids Config: configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py Metadata: Training Memory (GB): 23.2 Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth - Name: mask_rcnn_r50_fpg-chn128_crop640_50e_coco In Collection: Feature Pyramid Grids Config: configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py Metadata: Training Memory (GB): 15.3 Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859-043c9b4e.pth - Name: retinanet_r50_fpg_crop640_50e_coco In Collection: Feature Pyramid Grids Config: configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py Metadata: Training Memory (GB): 20.8 Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809-b0bcf5f4.pth - Name: retinanet_r50_fpg-chn128_crop640_50e_coco In Collection: Feature Pyramid Grids Config: configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py Metadata: Training Memory (GB): 19.9 Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829-ee99a686.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py ================================================ _base_ = 'retinanet_r50_fpg_crop640_50e_coco.py' model = dict( neck=dict(out_channels=128, inter_channels=128), bbox_head=dict(in_channels=128)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py ================================================ _base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict( _delete_=True, type='FPG', in_channels=[256, 512, 1024, 2048], out_channels=256, inter_channels=256, num_outs=5, add_extra_convs=True, start_level=1, stack_times=9, paths=['bu'] * 9, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_lateral_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_down_trans=dict( type='interpolation_conv', mode='nearest', kernel_size=3, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'), inplace=False), across_up_trans=None, across_skip_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), output_trans=dict( type='last_conv', kernel_size=3, order=('act', 'conv', 'norm'), inplace=False), norm_cfg=norm_cfg, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) evaluation = dict(interval=2) ================================================ FILE: DLTA_AI_app/mmdetection/configs/free_anchor/metafile.yml ================================================ Collections: - Name: FreeAnchor Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FreeAnchor - ResNet Paper: URL: https://arxiv.org/abs/1909.02466 Title: 'FreeAnchor: Learning to Match Anchors for Visual Object Detection' README: configs/free_anchor/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/dense_heads/free_anchor_retina_head.py#L10 Version: v2.0.0 Models: - Name: retinanet_free_anchor_r50_fpn_1x_coco In Collection: FreeAnchor Config: configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.9 inference time (ms/im): - value: 54.35 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth - Name: retinanet_free_anchor_r101_fpn_1x_coco In Collection: FreeAnchor Config: configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.8 inference time (ms/im): - value: 67.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130-358324e6.pth - Name: retinanet_free_anchor_x101_32x4d_fpn_1x_coco In Collection: FreeAnchor Config: configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 8.1 inference time (ms/im): - value: 90.09 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130-d4846968.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py ================================================ _base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py ================================================ _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict( bbox_head=dict( _delete_=True, type='FreeAnchorRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fsaf/fsaf_r101_fpn_1x_coco.py ================================================ _base_ = './fsaf_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fsaf/fsaf_r50_fpn_1x_coco.py ================================================ _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' # model settings model = dict( type='FSAF', bbox_head=dict( type='FSAFHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, reg_decoded_bbox=True, # Only anchor-free branch is implemented. The anchor generator only # generates 1 anchor at each feature point, as a substitute of the # grid of features. anchor_generator=dict( type='AnchorGenerator', octave_base_scale=1, scales_per_octave=1, ratios=[1.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0, reduction='none'), loss_bbox=dict( _delete_=True, type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none')), # training and testing settings train_cfg=dict( assigner=dict( _delete_=True, type='CenterRegionAssigner', pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01), allowed_border=-1, pos_weight=-1, debug=False)) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=10, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './fsaf_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/fsaf/metafile.yml ================================================ Collections: - Name: FSAF Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x Titan-XP GPUs Architecture: - FPN - FSAF - ResNet Paper: URL: https://arxiv.org/abs/1903.00621 Title: 'Feature Selective Anchor-Free Module for Single-Shot Object Detection' README: configs/fsaf/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/fsaf.py#L6 Version: v2.1.0 Models: - Name: fsaf_r50_fpn_1x_coco In Collection: FSAF Config: configs/fsaf/fsaf_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.15 inference time (ms/im): - value: 76.92 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth - Name: fsaf_r101_fpn_1x_coco In Collection: FSAF Config: configs/fsaf/fsaf_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 5.08 inference time (ms/im): - value: 92.59 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.3 (37.9) Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco-9e71098f.pth - Name: fsaf_x101_64x4d_fpn_1x_coco In Collection: FSAF Config: configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 9.38 inference time (ms/im): - value: 178.57 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.4 (41.0) Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco-e3f6e6fd.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py ================================================ _base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gcnet/metafile.yml ================================================ Collections: - Name: GCNet Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Global Context Block - FPN - RPN - ResNet - ResNeXt Paper: URL: https://arxiv.org/abs/1904.11492 Title: 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' README: configs/gcnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/context_block.py#L13 Version: v2.0.0 Models: - Name: mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915-187da160.pth - Name: mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 5.1 inference time (ms/im): - value: 66.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204-17235656.pth - Name: mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 7.6 inference time (ms/im): - value: 87.72 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205-e58ae947.pth - Name: mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 7.8 inference time (ms/im): - value: 86.21 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206-af22dc9d.pth - Name: mask_rcnn_r50_fpn_syncbn-backbone_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py Metadata: Training Memory (GB): 4.4 inference time (ms/im): - value: 60.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202-bb3eb55c.pth - Name: mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 5.0 inference time (ms/im): - value: 64.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth - Name: mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 5.1 inference time (ms/im): - value: 66.23 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth - Name: mask_rcnn_r101_fpn_syncbn-backbone_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 75.19 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210-81658c8a.pth - Name: mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 7.6 inference time (ms/im): - value: 83.33 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207-945e77ca.pth - Name: mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 7.8 inference time (ms/im): - value: 84.75 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py Metadata: Training Memory (GB): 7.6 inference time (ms/im): - value: 88.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211-7584841c.pth - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 8.8 inference time (ms/im): - value: 102.04 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-cbed3d2c.pth - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 9.0 inference time (ms/im): - value: 103.09 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212-68164964.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco In Collection: GCNet Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py Metadata: Training Memory (GB): 9.2 inference time (ms/im): - value: 119.05 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310-d5ad2a5e.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 10.3 inference time (ms/im): - value: 129.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-10bf2463.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py Metadata: Training Memory (GB): 10.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653-ed035291.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019-abbc39ea.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 48.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648-44aa598a.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco In Collection: GCNet Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851-720338ec.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py ================================================ _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py ================================================ _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gfl/gfl_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='GFL', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='GFLHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), reg_max=16, loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py ================================================ _base_ = './gfl_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) # multi-scale training img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py ================================================ _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' model = dict( type='GFL', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, False, True, True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py ================================================ _base_ = './gfl_r50_fpn_mstrain_2x_coco.py' model = dict( type='GFL', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gfl/metafile.yml ================================================ Collections: - Name: Generalized Focal Loss Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Generalized Focal Loss - FPN - ResNet Paper: URL: https://arxiv.org/abs/2006.04388 Title: 'Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection' README: configs/gfl/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/gfl.py#L6 Version: v2.2.0 Models: - Name: gfl_r50_fpn_1x_coco In Collection: Generalized Focal Loss Config: configs/gfl/gfl_r50_fpn_1x_coco.py Metadata: inference time (ms/im): - value: 51.28 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth - Name: gfl_r50_fpn_mstrain_2x_coco In Collection: Generalized Focal Loss Config: configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py Metadata: inference time (ms/im): - value: 51.28 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth - Name: gfl_r101_fpn_mstrain_2x_coco In Collection: Generalized Focal Loss Config: configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py Metadata: inference time (ms/im): - value: 68.03 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth - Name: gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco In Collection: Generalized Focal Loss Config: configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py Metadata: inference time (ms/im): - value: 77.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth - Name: gfl_x101_32x4d_fpn_mstrain_2x_coco In Collection: Generalized Focal Loss Config: configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py Metadata: inference time (ms/im): - value: 82.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth - Name: gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco In Collection: Generalized Focal Loss Config: configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py Metadata: inference time (ms/im): - value: 93.46 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 48.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/ghm/metafile.yml ================================================ Collections: - Name: GHM Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - GHM-C - GHM-R - FPN - ResNet Paper: URL: https://arxiv.org/abs/1811.05181 Title: 'Gradient Harmonized Single-stage Detector' README: configs/ghm/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/losses/ghm_loss.py#L21 Version: v2.0.0 Models: - Name: retinanet_ghm_r50_fpn_1x_coco In Collection: GHM Config: configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 303.03 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth - Name: retinanet_ghm_r101_fpn_1x_coco In Collection: GHM Config: configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 227.27 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130-c148ee8f.pth - Name: retinanet_ghm_x101_32x4d_fpn_1x_coco In Collection: GHM Config: configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.2 inference time (ms/im): - value: 196.08 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131-e4333bd0.pth - Name: retinanet_ghm_x101_64x4d_fpn_1x_coco In Collection: GHM Config: configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.3 inference time (ms/im): - value: 192.31 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131-dd381cef.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py ================================================ _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py ================================================ _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict( bbox_head=dict( loss_cls=dict( _delete_=True, type='GHMC', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0), loss_bbox=dict( _delete_=True, type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './retinanet_ghm_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet101_gn'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py ================================================ _base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet50_gn')), neck=dict(norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')), neck=dict(norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn/metafile.yml ================================================ Collections: - Name: Group Normalization Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Group Normalization Paper: URL: https://arxiv.org/abs/1803.08494 Title: 'Group Normalization' README: configs/gn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py Version: v2.0.0 Models: - Name: mask_rcnn_r50_fpn_gn-all_2x_coco In Collection: Group Normalization Config: configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py Metadata: Training Memory (GB): 7.1 inference time (ms/im): - value: 90.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth - Name: mask_rcnn_r50_fpn_gn-all_3x_coco In Collection: Group Normalization Config: configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py Metadata: Training Memory (GB): 7.1 inference time (ms/im): - value: 90.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214-8b23b1e5.pth - Name: mask_rcnn_r101_fpn_gn-all_2x_coco In Collection: Group Normalization Config: configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py Metadata: Training Memory (GB): 9.9 inference time (ms/im): - value: 111.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205-d96b1b50.pth - Name: mask_rcnn_r101_fpn_gn-all_3x_coco In Collection: Group Normalization Config: configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py Metadata: Training Memory (GB): 9.9 inference time (ms/im): - value: 111.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609-0df864f4.pth - Name: mask_rcnn_r50_fpn_gn-all_contrib_2x_coco In Collection: Group Normalization Config: configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py Metadata: Training Memory (GB): 7.1 inference time (ms/im): - value: 91.74 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207-20d3e849.pth - Name: mask_rcnn_r50_fpn_gn-all_contrib_3x_coco In Collection: Group Normalization Config: configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py Metadata: Training Memory (GB): 7.1 inference time (ms/im): - value: 91.74 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225-542aefbc.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, conv_cfg=conv_cfg, norm_cfg=norm_cfg))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py ================================================ _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( type='ResNeXt', depth=50, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py ================================================ _base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py' # learning policy lr_config = dict(step=[20, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' # learning policy lr_config = dict(step=[20, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, conv_cfg=conv_cfg, norm_cfg=norm_cfg), mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg))) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py ================================================ _base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py' # learning policy lr_config = dict(step=[20, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' # model settings conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py ================================================ _base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' # learning policy lr_config = dict(step=[20, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' # model settings conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( type='ResNeXt', depth=50, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/gn+ws/metafile.yml ================================================ Collections: - Name: Weight Standardization Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Group Normalization - Weight Standardization Paper: URL: https://arxiv.org/abs/1903.10520 Title: 'Weight Standardization' README: configs/gn+ws/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py Version: v2.0.0 Models: - Name: faster_rcnn_r50_fpn_gn_ws-all_1x_coco In Collection: Weight Standardization Config: configs/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py Metadata: Training Memory (GB): 5.9 inference time (ms/im): - value: 85.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth - Name: faster_rcnn_r101_fpn_gn_ws-all_1x_coco In Collection: Weight Standardization Config: configs/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py Metadata: Training Memory (GB): 8.9 inference time (ms/im): - value: 111.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205-a93b0d75.pth - Name: faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco In Collection: Weight Standardization Config: configs/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 97.09 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203-839c5d9d.pth - Name: faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco In Collection: Weight Standardization Config: configs/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py Metadata: Training Memory (GB): 10.8 inference time (ms/im): - value: 131.58 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212-27da1bc2.pth - Name: mask_rcnn_r50_fpn_gn_ws-all_2x_coco In Collection: Weight Standardization Config: configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py Metadata: Training Memory (GB): 7.3 inference time (ms/im): - value: 95.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226-16acb762.pth - Name: mask_rcnn_r101_fpn_gn_ws-all_2x_coco In Collection: Weight Standardization Config: configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py Metadata: Training Memory (GB): 10.3 inference time (ms/im): - value: 116.28 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212-ea357cd9.pth - Name: mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco In Collection: Weight Standardization Config: configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py Metadata: Training Memory (GB): 8.4 inference time (ms/im): - value: 107.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216-649fdb6f.pth - Name: mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco In Collection: Weight Standardization Config: configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py Metadata: Training Memory (GB): 12.2 inference time (ms/im): - value: 140.85 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319-33fb95b5.pth - Name: mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco In Collection: Weight Standardization Config: configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py Metadata: Training Memory (GB): 7.3 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213-487d1283.pth - Name: mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco In Collection: Weight Standardization Config: configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py Metadata: Training Memory (GB): 10.3 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213-57b5a50f.pth - Name: mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco In Collection: Weight Standardization Config: configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py Metadata: Training Memory (GB): 8.4 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226-969bcb2c.pth - Name: mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco In Collection: Weight Standardization Config: configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py Metadata: Training Memory (GB): 12.2 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316-e6cd35ef.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py ================================================ _base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py ================================================ _base_ = ['grid_rcnn_r50_fpn_gn-head_2x_coco.py'] # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) checkpoint_config = dict(interval=1) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='GridRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='GridRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', with_reg=False, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False), grid_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), grid_head=dict( type='GridHead', grid_points=9, num_convs=8, in_channels=256, point_feat_channels=64, norm_cfg=dict(type='GN', num_groups=36), loss_grid=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_radius=1, pos_weight=-1, max_num_grid=192, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.03, nms=dict(type='nms', iou_threshold=0.3), max_per_img=100))) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=3665, warmup_ratio=1.0 / 80, step=[17, 23]) runner = dict(type='EpochBasedRunner', max_epochs=25) ================================================ FILE: DLTA_AI_app/mmdetection/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py ================================================ _base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=3665, warmup_ratio=1.0 / 80, step=[17, 23]) runner = dict(type='EpochBasedRunner', max_epochs=25) ================================================ FILE: DLTA_AI_app/mmdetection/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py ================================================ _base_ = './grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/grid_rcnn/metafile.yml ================================================ Collections: - Name: Grid R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RPN - Dilated Convolution - ResNet - RoIAlign Paper: URL: https://arxiv.org/abs/1906.05688 Title: 'Grid R-CNN' README: configs/grid_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/grid_rcnn.py#L6 Version: v2.0.0 Models: - Name: grid_rcnn_r50_fpn_gn-head_2x_coco In Collection: Grid R-CNN Config: configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py Metadata: Training Memory (GB): 5.1 inference time (ms/im): - value: 66.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth - Name: grid_rcnn_r101_fpn_gn-head_2x_coco In Collection: Grid R-CNN Config: configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 79.37 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309-d6eca030.pth - Name: grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco In Collection: Grid R-CNN Config: configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py Metadata: Training Memory (GB): 8.3 inference time (ms/im): - value: 92.59 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130-d8f0e3ff.pth - Name: grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco In Collection: Grid R-CNN Config: configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py Metadata: Training Memory (GB): 11.3 inference time (ms/im): - value: 129.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204-ec76a754.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' # model settings model = dict( roi_head=dict( bbox_roi_extractor=dict( type='GenericRoIExtractor', aggregation='sum', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py ================================================ _base_ = '../grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py' # model settings model = dict( roi_head=dict( bbox_roi_extractor=dict( type='GenericRoIExtractor', aggregation='sum', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)), grid_roi_extractor=dict( type='GenericRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py ================================================ _base_ = '../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py' # model settings model = dict( roi_head=dict( bbox_roi_extractor=dict( type='GenericRoIExtractor', aggregation='sum', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)), mask_roi_extractor=dict( type='GenericRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' # model settings model = dict( roi_head=dict( bbox_roi_extractor=dict( type='GenericRoIExtractor', aggregation='sum', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)), mask_roi_extractor=dict( type='GenericRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py ================================================ _base_ = '../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py' # model settings model = dict( roi_head=dict( bbox_roi_extractor=dict( type='GenericRoIExtractor', aggregation='sum', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)), mask_roi_extractor=dict( type='GenericRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='GeneralizedAttention', in_channels=256, spatial_range=-1, num_heads=6, attention_type='0100', kv_stride=2)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/groie/metafile.yml ================================================ Collections: - Name: GRoIE Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Generic RoI Extractor - FPN - RPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/abs/2004.13665 Title: 'A novel Region of Interest Extraction Layer for Instance Segmentation' README: configs/groie/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/roi_extractors/groie.py#L15 Version: v2.1.0 Models: - Name: faster_rcnn_r50_fpn_groie_1x_coco In Collection: GRoIE Config: configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth - Name: grid_rcnn_r50_fpn_gn-head_groie_1x_coco In Collection: GRoIE Config: configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco/grid_rcnn_r50_fpn_gn-head_groie_1x_coco_20200605_202059-4b75d86f.pth - Name: mask_rcnn_r50_fpn_groie_1x_coco In Collection: GRoIE Config: configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth - Name: mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco In Collection: GRoIE Config: configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth - Name: mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco In Collection: GRoIE Config: configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py ================================================ _base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), roi_head=dict( bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), # model training and testing settings train_cfg=dict( rcnn=dict( assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), sampler=dict(num=256))), test_cfg=dict(rcnn=dict(score_thr=1e-3))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=300), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=None), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img', 'proposals']), ]) ] data = dict( train=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl', pipeline=train_pipeline), val=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline), test=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), roi_head=dict( bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), # model training and testing settings train_cfg=dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5), rpn_proposal=dict(nms_post=1000, max_per_img=300), rcnn=dict( assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), sampler=dict(type='RandomSampler', num=256))), test_cfg=dict( rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), roi_head=dict( bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), # model training and testing settings train_cfg=dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5), rpn_proposal=dict(nms_post=1000, max_per_img=300), rcnn=dict( assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), sampler=dict(type='RandomSampler', num=256))), test_cfg=dict( rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './ga_faster_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './ga_faster_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py ================================================ _base_ = '../_base_/default_runtime.py' # model settings model = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5), bbox_head=dict( type='GARetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0))) # training and testing settings train_cfg = dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.4, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, center_ratio=0.2, ignore_ratio=0.5, debug=False) test_cfg = dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 960)], keep_ratio=True, multiscale_mode='range'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[16, 22]) checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py ================================================ _base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py' model = dict( bbox_head=dict( _delete_=True, type='GARetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), # training and testing settings train_cfg=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.4, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), center_ratio=0.2, ignore_ratio=0.5)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py ================================================ _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict( bbox_head=dict( _delete_=True, type='GARetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), # training and testing settings train_cfg=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.4, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), center_ratio=0.2, ignore_ratio=0.5)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './ga_retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './ga_retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5)), test_cfg=dict(rpn=dict(nms_post=1000))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_rpn_r50_fpn_1x_coco.py ================================================ _base_ = '../rpn/rpn_r50_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5)), test_cfg=dict(rpn=dict(nms_post=1000))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './ga_rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './ga_rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/guided_anchoring/metafile.yml ================================================ Collections: - Name: Guided Anchoring Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - Guided Anchoring - ResNet Paper: URL: https://arxiv.org/abs/1901.03278 Title: 'Region Proposal by Guided Anchoring' README: configs/guided_anchoring/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/dense_heads/ga_retina_head.py#L10 Version: v2.0.0 Models: - Name: ga_rpn_r50_caffe_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 5.3 inference time (ms/im): - value: 63.29 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Region Proposal Dataset: COCO Metrics: AR@1000: 68.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531-899008a6.pth - Name: ga_rpn_r101_caffe_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 7.3 inference time (ms/im): - value: 76.92 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Region Proposal Dataset: COCO Metrics: AR@1000: 69.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531-ca9ba8fb.pth - Name: ga_rpn_x101_32x4d_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 8.5 inference time (ms/im): - value: 100 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Region Proposal Dataset: COCO Metrics: AR@1000: 70.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220-c28d1b18.pth - Name: ga_rpn_x101_64x4d_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.1 inference time (ms/im): - value: 133.33 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Region Proposal Dataset: COCO Metrics: AR@1000: 70.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225-3c6e1aa2.pth - Name: ga_faster_r50_caffe_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 5.5 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth - Name: ga_faster_r101_caffe_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 7.5 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_bbox_mAP-0.415_20200505_115528-fb82e499.pth - Name: ga_faster_x101_32x4d_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 8.7 inference time (ms/im): - value: 103.09 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215-1ded9da3.pth - Name: ga_faster_x101_64x4d_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 11.8 inference time (ms/im): - value: 136.99 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215-0fa7bde7.pth - Name: ga_retinanet_r50_caffe_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 3.5 inference time (ms/im): - value: 59.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth - Name: ga_retinanet_r101_caffe_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 5.5 inference time (ms/im): - value: 77.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531-6266453c.pth - Name: ga_retinanet_x101_32x4d_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 6.9 inference time (ms/im): - value: 94.34 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219-40c56caa.pth - Name: ga_retinanet_x101_64x4d_fpn_1x_coco In Collection: Guided Anchoring Config: configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 9.9 inference time (ms/im): - value: 129.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226-ef9f7f1f.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py ================================================ _base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' # model settings model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256)) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py ================================================ _base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' # model settings model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py ================================================ _base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' # model settings model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256)) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py ================================================ _base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' # model settings model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py ================================================ _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' # model settings model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py ================================================ _base_ = './faster_rcnn_hrnetv2p_w18_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py ================================================ _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py ================================================ _base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py ================================================ _base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py ================================================ _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py ================================================ _base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py ================================================ _base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py ================================================ _base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256, stride=2, num_outs=5)) img_norm_cfg = dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py ================================================ _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py ================================================ _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' img_norm_cfg = dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py ================================================ _base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py ================================================ _base_ = './htc_hrnetv2p_w32_20e_coco.py' model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py ================================================ _base_ = '../htc/htc_r50_fpn_20e_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py ================================================ _base_ = './htc_hrnetv2p_w32_20e_coco.py' model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py ================================================ _base_ = './htc_hrnetv2p_w40_20e_coco.py' # learning policy lr_config = dict(step=[24, 27]) runner = dict(type='EpochBasedRunner', max_epochs=28) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py ================================================ _base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' # learning policy lr_config = dict(step=[24, 27]) runner = dict(type='EpochBasedRunner', max_epochs=28) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py ================================================ _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py ================================================ _base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py ================================================ _base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py ================================================ _base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py ================================================ _base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/hrnet/metafile.yml ================================================ Models: - Name: faster_rcnn_hrnetv2p_w18_1x_coco In Collection: Faster R-CNN Config: configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py Metadata: Training Memory (GB): 6.6 inference time (ms/im): - value: 74.63 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: faster_rcnn_hrnetv2p_w18_2x_coco In Collection: Faster R-CNN Config: configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py Metadata: Training Memory (GB): 6.6 inference time (ms/im): - value: 74.63 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: faster_rcnn_hrnetv2p_w32_1x_coco In Collection: Faster R-CNN Config: configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py Metadata: Training Memory (GB): 9.0 inference time (ms/im): - value: 80.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: faster_rcnn_hrnetv2p_w32_2x_coco In Collection: Faster R-CNN Config: configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py Metadata: Training Memory (GB): 9.0 inference time (ms/im): - value: 80.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: faster_rcnn_hrnetv2p_w40_1x_coco In Collection: Faster R-CNN Config: configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py Metadata: Training Memory (GB): 10.4 inference time (ms/im): - value: 95.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: faster_rcnn_hrnetv2p_w40_2x_coco In Collection: Faster R-CNN Config: configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py Metadata: Training Memory (GB): 10.4 inference time (ms/im): - value: 95.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: mask_rcnn_hrnetv2p_w18_1x_coco In Collection: Mask R-CNN Config: configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 85.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: mask_rcnn_hrnetv2p_w18_2x_coco In Collection: Mask R-CNN Config: configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 85.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: mask_rcnn_hrnetv2p_w32_1x_coco In Collection: Mask R-CNN Config: configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py Metadata: Training Memory (GB): 9.4 inference time (ms/im): - value: 88.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: mask_rcnn_hrnetv2p_w32_2x_coco In Collection: Mask R-CNN Config: configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py Metadata: Training Memory (GB): 9.4 inference time (ms/im): - value: 88.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: mask_rcnn_hrnetv2p_w40_1x_coco In Collection: Mask R-CNN Config: configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py Metadata: Training Memory (GB): 10.9 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: mask_rcnn_hrnetv2p_w40_2x_coco In Collection: Mask R-CNN Config: configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py Metadata: Training Memory (GB): 10.9 Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: cascade_rcnn_hrnetv2p_w18_20e_coco In Collection: Cascade R-CNN Config: configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 90.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: cascade_rcnn_hrnetv2p_w32_20e_coco In Collection: Cascade R-CNN Config: configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py Metadata: Training Memory (GB): 9.4 inference time (ms/im): - value: 90.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: cascade_rcnn_hrnetv2p_w40_20e_coco In Collection: Cascade R-CNN Config: configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py Metadata: Training Memory (GB): 10.8 Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: cascade_mask_rcnn_hrnetv2p_w18_20e_coco In Collection: Cascade R-CNN Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py Metadata: Training Memory (GB): 8.5 inference time (ms/im): - value: 117.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: cascade_mask_rcnn_hrnetv2p_w32_20e_coco In Collection: Cascade R-CNN Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py Metadata: inference time (ms/im): - value: 120.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: cascade_mask_rcnn_hrnetv2p_w40_20e_coco In Collection: Cascade R-CNN Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py Metadata: Training Memory (GB): 12.5 Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: htc_hrnetv2p_w18_20e_coco In Collection: HTC Config: configs/hrnet/htc_hrnetv2p_w18_20e_coco.py Metadata: Training Memory (GB): 10.8 inference time (ms/im): - value: 212.77 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: htc_hrnetv2p_w32_20e_coco In Collection: HTC Config: configs/hrnet/htc_hrnetv2p_w32_20e_coco.py Metadata: Training Memory (GB): 13.1 inference time (ms/im): - value: 204.08 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: htc_hrnetv2p_w40_20e_coco In Collection: HTC Config: configs/hrnet/htc_hrnetv2p_w40_20e_coco.py Metadata: Training Memory (GB): 14.6 Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: fcos_hrnetv2p_w18_gn-head_4x4_1x_coco In Collection: FCOS Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py Metadata: Training Resources: 4x V100 GPUs Batch Size: 16 Training Memory (GB): 13.0 inference time (ms/im): - value: 77.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 35.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: fcos_hrnetv2p_w18_gn-head_4x4_2x_coco In Collection: FCOS Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py Metadata: Training Resources: 4x V100 GPUs Batch Size: 16 Training Memory (GB): 13.0 inference time (ms/im): - value: 77.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: fcos_hrnetv2p_w32_gn-head_4x4_1x_coco In Collection: FCOS Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py Metadata: Training Resources: 4x V100 GPUs Batch Size: 16 Training Memory (GB): 17.5 inference time (ms/im): - value: 77.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: fcos_hrnetv2p_w32_gn-head_4x4_2x_coco In Collection: FCOS Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py Metadata: Training Resources: 4x V100 GPUs Batch Size: 16 Training Memory (GB): 17.5 inference time (ms/im): - value: 77.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco In Collection: FCOS Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py Metadata: Training Resources: 4x V100 GPUs Batch Size: 16 Training Memory (GB): 13.0 inference time (ms/im): - value: 77.52 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco In Collection: FCOS Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py Metadata: Training Resources: 4x V100 GPUs Batch Size: 16 Training Memory (GB): 17.5 inference time (ms/im): - value: 80.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 - Name: fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco In Collection: FCOS Config: configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py Metadata: Training Resources: 4x V100 GPUs Batch Size: 16 Training Memory (GB): 20.3 inference time (ms/im): - value: 92.59 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Architecture: - HRNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth Paper: URL: https://arxiv.org/abs/1904.04514 Title: 'Deep High-Resolution Representation Learning for Visual Recognition' README: configs/hrnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 Version: v2.0.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/htc/htc_r101_fpn_20e_coco.py ================================================ _base_ = './htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/htc/htc_r50_fpn_1x_coco.py ================================================ _base_ = './htc_without_semantic_r50_fpn_1x_coco.py' model = dict( roi_head=dict( semantic_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[8]), semantic_head=dict( type='FusedSemanticHead', num_ins=5, fusion_level=1, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2)))) data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 8), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict( seg_prefix=data_root + 'stuffthingmaps/train2017/', pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/htc/htc_r50_fpn_20e_coco.py ================================================ _base_ = './htc_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='HybridTaskCascade', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='HybridTaskCascadeRoIHead', interleaved=True, mask_info_flow=True, num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=[ dict( type='HTCMaskHead', with_conv_res=False, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), dict( type='HTCMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), dict( type='HTCMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)) ]), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False) ]), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.001, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py ================================================ _base_ = './htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) data = dict(samples_per_gpu=1, workers_per_gpu=1) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py ================================================ _base_ = './htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) data = dict(samples_per_gpu=1, workers_per_gpu=1) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py ================================================ _base_ = './htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict( type='Resize', img_scale=[(1600, 400), (1600, 1400)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 8), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] data = dict( samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline)) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/htc/metafile.yml ================================================ Collections: - Name: HTC Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - HTC - RPN - ResNet - ResNeXt - RoIAlign Paper: URL: https://arxiv.org/abs/1901.07518 Title: 'Hybrid Task Cascade for Instance Segmentation' README: configs/htc/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/htc.py#L6 Version: v2.0.0 Models: - Name: htc_r50_fpn_1x_coco In Collection: HTC Config: configs/htc/htc_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 8.2 inference time (ms/im): - value: 172.41 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth - Name: htc_r50_fpn_20e_coco In Collection: HTC Config: configs/htc/htc_r50_fpn_20e_coco.py Metadata: Training Memory (GB): 8.2 inference time (ms/im): - value: 172.41 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth - Name: htc_r101_fpn_20e_coco In Collection: HTC Config: configs/htc/htc_r101_fpn_20e_coco.py Metadata: Training Memory (GB): 10.2 inference time (ms/im): - value: 181.82 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth - Name: htc_x101_32x4d_fpn_16x1_20e_coco In Collection: HTC Config: configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py Metadata: Training Resources: 16x V100 GPUs Batch Size: 16 Training Memory (GB): 11.4 inference time (ms/im): - value: 200 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth - Name: htc_x101_64x4d_fpn_16x1_20e_coco In Collection: HTC Config: configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py Metadata: Training Resources: 16x V100 GPUs Batch Size: 16 Training Memory (GB): 14.5 inference time (ms/im): - value: 227.27 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth - Name: htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco In Collection: HTC Config: configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py Metadata: Training Resources: 16x V100 GPUs Batch Size: 16 Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 43.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=(-1, 1), color_prob=0.5, hflag=False, aug_ratio=0.5), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) # learning policy lr_config = dict(step=[32, 44]) runner = dict(type='EpochBasedRunner', max_epochs=48) ================================================ FILE: DLTA_AI_app/mmdetection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py ================================================ _base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=(-1, 1), color_prob=0.5, hflag=False, aug_ratio=0.5), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) # learning policy lr_config = dict(step=[32, 44]) runner = dict(type='EpochBasedRunner', max_epochs=48) ================================================ FILE: DLTA_AI_app/mmdetection/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/instaboost/metafile.yml ================================================ Collections: - Name: InstaBoost Metadata: Training Data: COCO Training Techniques: - InstaBoost - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Paper: URL: https://arxiv.org/abs/1908.07801 Title: 'Instaboost: Boosting instance segmentation via probability map guided copy-pasting' README: configs/instaboost/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/datasets/pipelines/instaboost.py#L7 Version: v2.0.0 Models: - Name: mask_rcnn_r50_fpn_instaboost_4x_coco In Collection: InstaBoost Config: configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py Metadata: Training Memory (GB): 4.4 inference time (ms/im): - value: 57.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 48 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth - Name: mask_rcnn_r101_fpn_instaboost_4x_coco In Collection: InstaBoost Config: configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py Metadata: Training Memory (GB): 6.4 Epochs: 48 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738-f23f3a5f.pth - Name: mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco In Collection: InstaBoost Config: configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py Metadata: Training Memory (GB): 10.7 Epochs: 48 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947-8ed58c1b.pth - Name: cascade_mask_rcnn_r50_fpn_instaboost_4x_coco In Collection: InstaBoost Config: configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 83.33 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 48 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-c19d98d9.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa model = dict( type='LAD', # student backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='LADHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # teacher teacher_ckpt=teacher_ckpt, teacher_backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), teacher_neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), teacher_bbox_head=dict( type='LADHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, score_voting=True, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) data = dict(samples_per_gpu=8, workers_per_gpu=4) optimizer = dict(lr=0.01) fp16 = dict(loss_scale=512.) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa model = dict( type='LAD', # student backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='LADHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # teacher teacher_ckpt=teacher_ckpt, teacher_backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), teacher_neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), teacher_bbox_head=dict( type='LADHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, score_voting=True, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) data = dict(samples_per_gpu=8, workers_per_gpu=4) optimizer = dict(lr=0.01) fp16 = dict(loss_scale=512.) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lad/metafile.yml ================================================ Collections: - Name: Label Assignment Distillation Metadata: Training Data: COCO Training Techniques: - Label Assignment Distillation - SGD with Momentum - Weight Decay Training Resources: 2x V100 GPUs Architecture: - FPN - ResNet Paper: URL: https://arxiv.org/abs/2108.10520 Title: 'Improving Object Detection by Label Assignment Distillation' README: configs/lad/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.19.0/mmdet/models/detectors/lad.py#L10 Version: v2.19.0 Models: - Name: lad_r101_paa_r50_fpn_coco_1x In Collection: Label Assignment Distillation Config: configs/lad/lad_r101_paa_r50_fpn_coco_1x.py Metadata: Training Memory (GB): 12.4 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r101_paa_r50_fpn_coco_1x/lad_r101_paa_r50_fpn_coco_1x_20220708_124357-9407ac54.pth - Name: lad_r50_paa_r101_fpn_coco_1x In Collection: Label Assignment Distillation Config: configs/lad/lad_r50_paa_r101_fpn_coco_1x.py Metadata: Training Memory (GB): 8.9 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r50_paa_r101_fpn_coco_1x/lad_r50_paa_r101_fpn_coco_1x_20220708_124246-74c76ff0.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py ================================================ _base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa model = dict( teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py', teacher_ckpt=teacher_ckpt, backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5)) lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) # multi-scale training img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa model = dict( type='KnowledgeDistillationSingleStageDetector', teacher_config='configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py', teacher_ckpt=teacher_ckpt, backbone=dict( type='ResNet', depth=18, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict( type='FPN', in_channels=[64, 128, 256, 512], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='LDHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), loss_ld=dict( type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), reg_max=16, loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py ================================================ _base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] model = dict( backbone=dict( type='ResNet', depth=34, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet34')), neck=dict( type='FPN', in_channels=[64, 128, 256, 512], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py ================================================ _base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ld/metafile.yml ================================================ Collections: - Name: Localization Distillation Metadata: Training Data: COCO Training Techniques: - Localization Distillation - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet Paper: URL: https://arxiv.org/abs/2102.12252 Title: 'Localization Distillation for Dense Object Detection' README: configs/ld/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.11.0/mmdet/models/dense_heads/ld_head.py#L11 Version: v2.11.0 Models: - Name: ld_r18_gflv1_r101_fpn_coco_1x In Collection: Localization Distillation Config: configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py Metadata: Training Memory (GB): 1.8 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206-330e6332.pth - Name: ld_r34_gflv1_r101_fpn_coco_1x In Collection: Localization Distillation Config: configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py Metadata: Training Memory (GB): 2.2 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007-9bc69413.pth - Name: ld_r50_gflv1_r101_fpn_coco_1x In Collection: Localization Distillation Config: configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py Metadata: Training Memory (GB): 3.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355-8dc5bad8.pth - Name: ld_r101_gflv1_r101dcn_fpn_coco_2x In Collection: Localization Distillation Config: configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py Metadata: Training Memory (GB): 5.5 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920-9e658426.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py ================================================ _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='CascadeRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5), bbox_coder=dict( type='LegacyDeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0])), roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2, aligned=False)), bbox_head=[ dict( type='Shared2FCBBoxHead', reg_class_agnostic=True, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='LegacyDeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2])), dict( type='Shared2FCBBoxHead', reg_class_agnostic=True, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='LegacyDeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1])), dict( type='Shared2FCBBoxHead', reg_class_agnostic=True, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='LegacyDeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067])), ], mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', output_size=14, sampling_ratio=2, aligned=False)))) dist_params = dict(backend='nccl', port=29515) ================================================ FILE: DLTA_AI_app/mmdetection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='FasterRCNN', backbone=dict( init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), rpn_head=dict( type='RPNHead', anchor_generator=dict( type='LegacyAnchorGenerator', center_offset=0.5, scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2, aligned=False), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn_proposal=dict(max_per_img=2000), rcnn=dict(assigner=dict(match_low_quality=True)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( rpn_head=dict( anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5), bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2, aligned=False)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', output_size=14, sampling_ratio=2, aligned=False)), bbox_head=dict( bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn_proposal=dict(max_per_img=2000), rcnn=dict(assigner=dict(match_low_quality=True)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py ================================================ _base_ = './retinanet_r50_fpn_1x_coco_v1.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( bbox_head=dict( type='RetinaHead', anchor_generator=dict( type='LegacyAnchorGenerator', center_offset=0.5, octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/legacy_1.x/ssd300_coco_v1.py ================================================ _base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings input_size = 300 model = dict( bbox_head=dict( type='SSDHead', anchor_generator=dict( type='LegacySSDAnchorGenerator', scale_major=False, input_size=input_size, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), bbox_coder=dict( type='LegacyDeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]))) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( _delete_=True, type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(_delete_=True) dist_params = dict(backend='nccl', port=29555) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' # model settings model = dict( neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), dict( type='BFP', in_channels=256, num_levels=5, refine_level=2, refine_type='non_local') ], roi_head=dict( bbox_head=dict( loss_bbox=dict( _delete_=True, type='BalancedL1Loss', alpha=0.5, gamma=1.5, beta=1.0, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rcnn=dict( sampler=dict( _delete_=True, type='CombinedSampler', num=512, pos_fraction=0.25, add_gt_as_proposals=True, pos_sampler=dict(type='InstanceBalancedPosSampler'), neg_sampler=dict( type='IoUBalancedNegSampler', floor_thr=-1, floor_fraction=0, num_bins=3))))) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' data = dict( train=dict(proposal_file=data_root + 'libra_proposals/rpn_r50_fpn_1x_train2017.pkl'), val=dict(proposal_file=data_root + 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'), test=dict(proposal_file=data_root + 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl')) ================================================ FILE: DLTA_AI_app/mmdetection/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py ================================================ _base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' # model settings model = dict( neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), dict( type='BFP', in_channels=256, num_levels=5, refine_level=2, refine_type='non_local') ], roi_head=dict( bbox_head=dict( loss_bbox=dict( _delete_=True, type='BalancedL1Loss', alpha=0.5, gamma=1.5, beta=1.0, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict(sampler=dict(neg_pos_ub=5), allowed_border=-1), rcnn=dict( sampler=dict( _delete_=True, type='CombinedSampler', num=512, pos_fraction=0.25, add_gt_as_proposals=True, pos_sampler=dict(type='InstanceBalancedPosSampler'), neg_sampler=dict( type='IoUBalancedNegSampler', floor_thr=-1, floor_fraction=0, num_bins=3))))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py ================================================ _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' # model settings model = dict( neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), dict( type='BFP', in_channels=256, num_levels=5, refine_level=1, refine_type='non_local') ], bbox_head=dict( loss_bbox=dict( _delete_=True, type='BalancedL1Loss', alpha=0.5, gamma=1.5, beta=0.11, loss_weight=1.0))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/libra_rcnn/metafile.yml ================================================ Collections: - Name: Libra R-CNN Metadata: Training Data: COCO Training Techniques: - IoU-Balanced Sampling - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Balanced Feature Pyramid Paper: URL: https://arxiv.org/abs/1904.02701 Title: 'Libra R-CNN: Towards Balanced Learning for Object Detection' README: configs/libra_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/bfp.py#L10 Version: v2.0.0 Models: - Name: libra_faster_rcnn_r50_fpn_1x_coco In Collection: Libra R-CNN Config: configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.6 inference time (ms/im): - value: 52.63 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth - Name: libra_faster_rcnn_r101_fpn_1x_coco In Collection: Libra R-CNN Config: configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.5 inference time (ms/im): - value: 69.44 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203-8dba6a5a.pth - Name: libra_faster_rcnn_x101_64x4d_fpn_1x_coco In Collection: Libra R-CNN Config: configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.8 inference time (ms/im): - value: 117.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth - Name: libra_retinanet_r50_fpn_1x_coco In Collection: Libra R-CNN Config: configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.2 inference time (ms/im): - value: 56.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205-804d94ce.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/lvis_v0.5_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=1230), mask_head=dict(num_classes=1230)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py ================================================ _base_ = './mask2former_r50_lsj_8x2_50e_coco-panoptic.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py ================================================ _base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py ================================================ _base_ = [ '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' ] num_things_classes = 80 num_stuff_classes = 53 num_classes = num_things_classes + num_stuff_classes model = dict( type='Mask2Former', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=-1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), panoptic_head=dict( type='Mask2FormerHead', in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside strides=[4, 8, 16, 32], feat_channels=256, out_channels=256, num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, num_queries=100, num_transformer_feat_level=3, pixel_decoder=dict( type='MSDeformAttnPixelDecoder', num_outs=3, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiScaleDeformableAttention', embed_dims=256, num_heads=8, num_levels=3, num_points=4, im2col_step=64, dropout=0.0, batch_first=False, norm_cfg=None, init_cfg=None), ffn_cfgs=dict( type='FFN', embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), operation_order=('self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), init_cfg=None), enforce_decoder_input_project=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), transformer_decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=9, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, attn_drop=0.0, proj_drop=0.0, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.0, dropout_layer=None, add_identity=True), feedforward_channels=2048, operation_order=('cross_attn', 'norm', 'self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, reduction='mean', class_weight=[1.0] * num_classes + [0.1]), loss_mask=dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=5.0), loss_dice=dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=5.0)), panoptic_fusion_head=dict( type='MaskFormerFusionHead', num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_panoptic=None, init_cfg=None), train_cfg=dict( num_points=12544, oversample_ratio=3.0, importance_sample_ratio=0.75, assigner=dict( type='MaskHungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=2.0), mask_cost=dict( type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), dice_cost=dict( type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), sampler=dict(type='MaskPseudoSampler')), test_cfg=dict( panoptic_on=True, # For now, the dataset does not support # evaluating semantic segmentation metric. semantic_on=False, instance_on=True, # max_per_image is for instance segmentation. max_per_image=100, iou_thr=0.8, # In Mask2Former's panoptic postprocessing, # it will filter mask area where score is less than 0.5 . filter_low_score=True), init_cfg=None) # dataset settings image_size = (1024, 1024) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), # large scale jittering dict( type='Resize', img_scale=image_size, ratio_range=(0.1, 2.0), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_size=image_size, crop_type='absolute', recompute_bbox=True, allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=image_size), dict(type='DefaultFormatBundle', img_to_float=True), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data_root = 'data/coco/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict( pipeline=test_pipeline, ins_ann_file=data_root + 'annotations/instances_val2017.json', ), test=dict( pipeline=test_pipeline, ins_ann_file=data_root + 'annotations/instances_val2017.json', )) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi, }, norm_decay_mult=0.0)) optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) # learning policy lr_config = dict( policy='step', gamma=0.1, by_epoch=False, step=[327778, 355092], warmup='linear', warmup_by_epoch=False, warmup_ratio=1.0, # no warmup warmup_iters=10) max_iters = 368750 runner = dict(type='IterBasedRunner', max_iters=max_iters) log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook', by_epoch=False), dict(type='TensorboardLoggerHook', by_epoch=False) ]) interval = 5000 workflow = [('train', interval)] checkpoint_config = dict( by_epoch=False, interval=interval, save_last=True, max_keep_ckpts=3) # Before 365001th iteration, we do evaluation every 5000 iterations. # After 365000th iteration, we do evaluation every 368750 iterations, # which means that we do evaluation at the end of training. dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] evaluation = dict( interval=interval, dynamic_intervals=dynamic_intervals, metric=['PQ', 'bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py ================================================ _base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] num_things_classes = 80 num_stuff_classes = 0 num_classes = num_things_classes + num_stuff_classes model = dict( panoptic_head=dict( num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])), panoptic_fusion_head=dict( num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes), test_cfg=dict(panoptic_on=False)) # dataset settings image_size = (1024, 1024) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) pad_cfg = dict(img=(128, 128, 128), masks=0, seg=255) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), # large scale jittering dict( type='Resize', img_scale=image_size, ratio_range=(0.1, 2.0), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_size=image_size, crop_type='absolute', recompute_bbox=True, allow_negative_crop=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True), dict(type='Pad', size=image_size, pad_val=pad_cfg), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle', img_to_float=True), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Pad', size_divisor=32, pad_val=pad_cfg), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] dataset_type = 'CocoDataset' data_root = 'data/coco/' data = dict( _delete_=True, samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py ================================================ _base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa model = dict( backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py ================================================ _base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( pretrain_img_size=384, embed_dims=128, depths=depths, num_heads=[4, 8, 16, 32], window_size=12, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict(in_channels=[128, 256, 512, 1024])) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py ================================================ _base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa model = dict( backbone=dict( embed_dims=192, num_heads=[6, 12, 24, 48], init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536])) data = dict(samples_per_gpu=1, workers_per_gpu=1) lr_config = dict(step=[655556, 710184]) max_iters = 737500 runner = dict(type='IterBasedRunner', max_iters=max_iters) # Before 735001th iteration, we do evaluation every 5000 iterations. # After 735000th iteration, we do evaluation every 737500 iterations, # which means that we do evaluation at the end of training.' interval = 5000 dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] evaluation = dict( interval=interval, dynamic_intervals=dynamic_intervals, metric=['PQ', 'bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py ================================================ _base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( depths=depths, init_cfg=dict(type='Pretrained', checkpoint=pretrained))) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py ================================================ _base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( depths=depths, init_cfg=dict(type='Pretrained', checkpoint=pretrained))) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py ================================================ _base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa depths = [2, 2, 6, 2] model = dict( type='Mask2Former', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=depths, num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, frozen_stages=-1, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict( type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), init_cfg=None) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py ================================================ _base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa depths = [2, 2, 6, 2] model = dict( type='Mask2Former', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=depths, num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, frozen_stages=-1, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict( type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), init_cfg=None) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask2former/metafile.yml ================================================ Collections: - Name: Mask2Former Metadata: Training Data: COCO Training Techniques: - AdamW - Weight Decay Training Resources: 8x A100 GPUs Architecture: - Mask2Former Paper: URL: https://arxiv.org/pdf/2112.01527 Title: 'Masked-attention Mask Transformer for Universal Image Segmentation' README: configs/mask2former/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/detectors/mask2former.py#L7 Version: v2.23.0 Models: - Name: mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 19.1 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.5 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 54.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200-c7b94355.pth - Name: mask2former_r101_lsj_8x2_50e_coco In Collection: Mask2Former Config: configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py Metadata: Training Memory (GB): 15.5 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth - Name: mask2former_r101_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 16.1 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.4 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 52.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104-c54e64c9.pth - Name: mask2former_r50_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 13.9 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.9 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 51.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth - Name: mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 15.9 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 43.4 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 53.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553-fc567107.pth - Name: mask2former_r50_lsj_8x2_50e_coco In Collection: Mask2Former Config: configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py Metadata: Training Memory (GB): 13.7 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth - Name: mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py Metadata: Training Memory (GB): 21.1 Iterations: 737500 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 52.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 48.5 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 57.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949-d4919c44.pth - Name: mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 25.8 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 46.3 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 56.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021-3bb8b482.pth - Name: mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 26.0 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 48.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.9 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 55.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244-c149a9e9.pth - Name: mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py Metadata: Training Memory (GB): 15.3 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth - Name: mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py Metadata: Training Memory (GB): 18.8 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 46.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py ================================================ _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # learning policy lr_config = dict(step=[16, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( bbox_roi_extractor=dict( roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2, aligned=False)), bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_roi_extractor=dict( roi_layer=dict( type='RoIAlign', output_size=14, sampling_ratio=2, aligned=False)))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_wandb_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # Set evaluation interval evaluation = dict(interval=2) # Set checkpoint interval checkpoint_config = dict(interval=4) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict(type='MMDetWandbHook', init_kwargs={ 'project': 'mmdetection', 'group': 'maskrcnn-r50-fpn-1x-coco' }, interval=50, log_checkpoint=True, log_checkpoint_metadata=True, num_eval_images=100) ]) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py ================================================ _base_ = './mask_rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py ================================================ _base_ = './mask_rcnn_r101_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py ================================================ _base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py ================================================ _base_ = './mask_rcnn_x101_32x4d_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/mask_rcnn/metafile.yml ================================================ Collections: - Name: Mask R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Softmax - RPN - Convolution - Dense Connections - FPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/abs/1703.06870v3 Title: "Mask R-CNN" README: configs/mask_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_rcnn.py#L6 Version: v2.0.0 Models: - Name: mask_rcnn_r50_caffe_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth - Name: mask_rcnn_r50_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.4 inference time (ms/im): - value: 62.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth - Name: mask_rcnn_r50_fpn_fp16_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py Metadata: Training Memory (GB): 3.6 Training Techniques: - SGD with Momentum - Weight Decay - Mixed Precision Training inference time (ms/im): - value: 41.49 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth - Name: mask_rcnn_r50_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py Metadata: Training Memory (GB): 4.4 inference time (ms/im): - value: 62.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth - Name: mask_rcnn_r101_caffe_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth - Name: mask_rcnn_r101_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 74.07 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth - Name: mask_rcnn_r101_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 74.07 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth - Name: mask_rcnn_x101_32x4d_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.6 inference time (ms/im): - value: 88.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth - Name: mask_rcnn_x101_32x4d_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 7.6 inference time (ms/im): - value: 88.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth - Name: mask_rcnn_x101_64x4d_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.7 inference time (ms/im): - value: 125 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth - Name: mask_rcnn_x101_64x4d_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 10.7 inference time (ms/im): - value: 125 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth - Name: mask_rcnn_x101_32x8d_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth - Name: mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth - Name: mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth - Name: mask_rcnn_r50_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 4.1 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth - Name: mask_rcnn_r101_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 6.1 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth - Name: mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 5.9 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth - Name: mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 7.3 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth - Name: mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py Metadata: Training Memory (GB): 10.4 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346-b4637974.pth - Name: mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 10.3 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth - Name: mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py Metadata: Epochs: 36 Training Memory (GB): 10.4 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' ] num_things_classes = 80 num_stuff_classes = 53 num_classes = num_things_classes + num_stuff_classes model = dict( type='MaskFormer', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=-1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), panoptic_head=dict( type='MaskFormerHead', in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside feat_channels=256, out_channels=256, num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, num_queries=100, pixel_decoder=dict( type='TransformerEncoderPixelDecoder', norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, attn_drop=0.1, proj_drop=0.1, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.1, dropout_layer=None, add_identity=True), operation_order=('self_attn', 'norm', 'ffn', 'norm'), norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True)), enforce_decoder_input_project=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), transformer_decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, attn_drop=0.1, proj_drop=0.1, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.1, dropout_layer=None, add_identity=True), # the following parameter was not used, # just make current api happy feedforward_channels=2048, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), init_cfg=None), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, reduction='mean', class_weight=[1.0] * num_classes + [0.1]), loss_mask=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=20.0), loss_dice=dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=1.0)), panoptic_fusion_head=dict( type='MaskFormerFusionHead', num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_panoptic=None, init_cfg=None), train_cfg=dict( assigner=dict( type='MaskHungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict( type='FocalLossCost', weight=20.0, binary_input=True), dice_cost=dict( type='DiceCost', weight=1.0, pred_act=True, eps=1.0)), sampler=dict(type='MaskPseudoSampler')), test_cfg=dict( panoptic_on=True, # For now, the dataset does not support # evaluating semantic segmentation metric. semantic_on=False, instance_on=False, # max_per_image is for instance segmentation. max_per_image=100, object_mask_thr=0.8, iou_thr=0.8, # In MaskFormer's panoptic postprocessing, # it will not filter masks whose score is smaller than 0.5 . filter_low_score=False), init_cfg=None) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.0001, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'query_embed': dict(lr_mult=1.0, decay_mult=0.0) }, norm_decay_mult=0.0)) optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) # learning policy lr_config = dict( policy='step', gamma=0.1, by_epoch=True, step=[50], warmup='linear', warmup_by_epoch=False, warmup_ratio=1.0, # no warmup warmup_iters=10) runner = dict(type='EpochBasedRunner', max_epochs=75) ================================================ FILE: DLTA_AI_app/mmdetection/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py ================================================ _base_ = './maskformer_r50_mstrain_16x1_75e_coco.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( _delete_=True, type='SwinTransformer', pretrain_img_size=384, embed_dims=192, patch_size=4, window_size=12, mlp_ratio=4, depths=depths, num_heads=[6, 12, 24, 48], qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict( in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside pixel_decoder=dict( _delete_=True, type='PixelDecoder', norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU')), enforce_decoder_input_project=True)) # weight_decay = 0.01 # norm_weight_decay = 0.0 # embed_weight_decay = 0.0 embed_multi = dict(lr_mult=1.0, decay_mult=0.0) norm_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'norm': norm_multi, 'absolute_pos_embed': embed_multi, 'relative_position_bias_table': embed_multi, 'query_embed': embed_multi } # optimizer optimizer = dict( type='AdamW', lr=6e-5, weight_decay=0.01, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) # learning policy lr_config = dict( policy='step', gamma=0.1, by_epoch=True, step=[250], warmup='linear', warmup_by_epoch=False, warmup_ratio=1e-6, warmup_iters=1500) runner = dict(type='EpochBasedRunner', max_epochs=300) ================================================ FILE: DLTA_AI_app/mmdetection/configs/maskformer/metafile.yml ================================================ Collections: - Name: MaskFormer Metadata: Training Data: COCO Training Techniques: - AdamW - Weight Decay Training Resources: 16x V100 GPUs Architecture: - MaskFormer Paper: URL: https://arxiv.org/pdf/2107.06278 Title: 'Per-Pixel Classification is Not All You Need for Semantic Segmentation' README: configs/maskformer/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/detectors/maskformer.py#L7 Version: v2.22.0 Models: - Name: maskformer_r50_mstrain_16x1_75e_coco In Collection: MaskFormer Config: configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py Metadata: Training Memory (GB): 16.2 Epochs: 75 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 46.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth - Name: maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco In Collection: MaskFormer Config: configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py Metadata: Training Memory (GB): 27.2 Epochs: 300 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 53.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612-061b4eb8.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/metafile.yml ================================================ Collections: - Name: Mask Scoring R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RPN - FPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/abs/1903.00241 Title: 'Mask Scoring R-CNN' README: configs/ms_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_scoring_rcnn.py#L6 Version: v2.0.0 Models: - Name: ms_rcnn_r50_caffe_fpn_1x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 4.5 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth - Name: ms_rcnn_r50_caffe_fpn_2x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth - Name: ms_rcnn_r101_caffe_fpn_1x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 6.5 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth - Name: ms_rcnn_r101_caffe_fpn_2x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth - Name: ms_rcnn_x101_32x4d_fpn_1x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.9 inference time (ms/im): - value: 90.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth - Name: ms_rcnn_x101_64x4d_fpn_1x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 11.0 inference time (ms/im): - value: 125 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth - Name: ms_rcnn_x101_64x4d_fpn_2x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 11.0 inference time (ms/im): - value: 125 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py ================================================ _base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( type='MaskScoringRCNN', roi_head=dict( type='MaskScoringRoIHead', mask_iou_head=dict( type='MaskIoUHead', num_convs=4, num_fcs=2, roi_feat_size=14, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80)), # model training and testing settings train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py ================================================ _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( type='MaskScoringRCNN', roi_head=dict( type='MaskScoringRoIHead', mask_iou_head=dict( type='MaskIoUHead', num_convs=4, num_fcs=2, roi_feat_size=14, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80)), # model training and testing settings train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './ms_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './ms_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py ================================================ _base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/nas_fcos/metafile.yml ================================================ Collections: - Name: NAS-FCOS Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 4x V100 GPUs Architecture: - FPN - NAS-FCOS - ResNet Paper: URL: https://arxiv.org/abs/1906.04423 Title: 'NAS-FCOS: Fast Neural Architecture Search for Object Detection' README: configs/nas_fcos/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/nasfcos.py#L6 Version: v2.1.0 Models: - Name: nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco In Collection: NAS-FCOS Config: configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth - Name: nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco In Collection: NAS-FCOS Config: configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='NASFCOS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False, eps=0), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=dict( type='NASFCOS_FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5, norm_cfg=dict(type='BN'), conv_cfg=dict(type='DCNv2', deform_groups=2)), bbox_head=dict( type='FCOSHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 16, 32, 64, 128], norm_cfg=dict(type='GN', num_groups=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=4, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='NASFCOS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False, eps=0), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=dict( type='NASFCOS_FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5, norm_cfg=dict(type='BN'), conv_cfg=dict(type='DCNv2', deform_groups=2)), bbox_head=dict( type='NASFCOSHead', num_classes=80, in_channels=256, feat_channels=256, strides=[8, 16, 32, 64, 128], norm_cfg=dict(type='GN', num_groups=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=4, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/nas_fpn/metafile.yml ================================================ Collections: - Name: NAS-FPN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - NAS-FPN - ResNet Paper: URL: https://arxiv.org/abs/1904.07392 Title: 'NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection' README: configs/nas_fpn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/nas_fpn.py#L67 Version: v2.0.0 Models: - Name: retinanet_r50_fpn_crop640_50e_coco In Collection: NAS-FPN Config: configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py Metadata: Training Memory (GB): 12.9 inference time (ms/im): - value: 43.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth - Name: retinanet_r50_nasfpn_crop640_50e_coco In Collection: NAS-FPN Config: configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py Metadata: Training Memory (GB): 13.2 inference time (ms/im): - value: 43.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( relu_before_extra_convs=True, no_norm_on_lateral=True, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True # model settings norm_cfg = dict(type='BN', requires_grad=True) model = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/openimages_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=601))) # Using 32 GPUS while training optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=26000, warmup_ratio=1.0 / 64, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py ================================================ _base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] model = dict( roi_head=dict(bbox_head=dict(num_classes=500)), test_cfg=dict(rcnn=dict(score_thr=0.01))) # dataset settings dataset_type = 'OpenImagesChallengeDataset' data_root = 'data/OpenImages/' data = dict( train=dict( type=dataset_type, ann_file=data_root + 'challenge2019/challenge-2019-train-detection-bbox.txt', img_prefix=data_root + 'OpenImages/', label_file=data_root + 'challenge2019/cls-label-description.csv', hierarchy_file=data_root + 'challenge2019/class_label_tree.np'), val=dict( type=dataset_type, ann_file=data_root + 'challenge2019/challenge-2019-validation-detection-bbox.txt', img_prefix=data_root + 'OpenImages/', label_file=data_root + 'challenge2019/cls-label-description.csv', hierarchy_file=data_root + 'challenge2019/class_label_tree.np', meta_file=data_root + 'challenge2019/challenge-2019-validation-metas.pkl', image_level_ann_file=data_root + 'challenge2019/challenge-2019-validation-detection-' 'human-imagelabels.csv'), test=dict( type=dataset_type, ann_file=data_root + 'challenge2019/challenge-2019-validation-detection-bbox.txt', img_prefix=data_root + 'OpenImages/', label_file=data_root + 'challenge2019/cls-label-description.csv', hierarchy_file=data_root + 'challenge2019/class_label_tree.np', meta_file=data_root + 'challenge2019/challenge-2019-validation-metas.pkl', image_level_ann_file=data_root + 'challenge2019/challenge-2019-validation-detection-' 'human-imagelabels.csv')) evaluation = dict(interval=1, metric='mAP') # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py ================================================ _base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] # Use ClassAwareSampler data = dict( train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py ================================================ _base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py'] # Use ClassAwareSampler data = dict( train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/openimages/metafile.yml ================================================ Models: - Name: faster_rcnn_r50_fpn_32x2_1x_openimages In Collection: Faster R-CNN Config: configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py Metadata: Training Memory (GB): 7.7 Epochs: 12 Training Data: Open Images v6 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images v6 Metrics: box AP: 51.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth - Name: retinanet_r50_fpn_32x2_1x_openimages In Collection: RetinaNet Config: configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py Metadata: Training Memory (GB): 6.6 Epochs: 12 Training Data: Open Images v6 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images v6 Metrics: box AP: 61.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth - Name: ssd300_32x8_36e_openimages In Collection: SSD Config: configs/openimages/ssd300_32x8_36e_openimages.py Metadata: Training Memory (GB): 10.8 Epochs: 36 Training Data: Open Images v6 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images v6 Metrics: box AP: 35.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth - Name: faster_rcnn_r50_fpn_32x2_1x_openimages_challenge In Collection: Faster R-CNN Config: configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py Metadata: Training Memory (GB): 7.7 Epochs: 12 Training Data: Open Images Challenge 2019 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images Challenge 2019 Metrics: box AP: 54.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth - Name: faster_rcnn_r50_fpn_32x2_cas_1x_openimages In Collection: Faster R-CNN Config: configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py Metadata: Training Memory (GB): 7.7 Epochs: 12 Training Data: Open Images Challenge 2019 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images Challenge 2019 Metrics: box AP: 60.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth - Name: faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge In Collection: Faster R-CNN Config: configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py Metadata: Training Memory (GB): 7.1 Epochs: 12 Training Data: Open Images Challenge 2019 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images Challenge 2019 Metrics: box AP: 65.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/openimages_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=601)) optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=26000, warmup_ratio=1.0 / 64, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/openimages/ssd300_32x8_36e_openimages.py ================================================ _base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' ] model = dict( bbox_head=dict( num_classes=601, anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) # dataset settings dataset_type = 'OpenImagesDataset' data_root = 'data/OpenImages/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True, normed_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, # using 32 GPUS while training. workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory train=dict( _delete_=True, type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv', img_prefix=data_root + 'OpenImages/train/', label_file=data_root + 'annotations/class-descriptions-boxable.csv', hierarchy_file=data_root + 'annotations/bbox_labels_600_hierarchy.json', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=20000, warmup_ratio=0.001, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=256) ================================================ FILE: DLTA_AI_app/mmdetection/configs/paa/metafile.yml ================================================ Collections: - Name: PAA Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - Probabilistic Anchor Assignment - ResNet Paper: URL: https://arxiv.org/abs/2007.08103 Title: 'Probabilistic Anchor Assignment with IoU Prediction for Object Detection' README: configs/paa/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/paa.py#L6 Version: v2.4.0 Models: - Name: paa_r50_fpn_1x_coco In Collection: PAA Config: configs/paa/paa_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.7 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth - Name: paa_r50_fpn_1.5x_coco In Collection: PAA Config: configs/paa/paa_r50_fpn_1.5x_coco.py Metadata: Training Memory (GB): 3.7 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth - Name: paa_r50_fpn_2x_coco In Collection: PAA Config: configs/paa/paa_r50_fpn_2x_coco.py Metadata: Training Memory (GB): 3.7 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth - Name: paa_r50_fpn_mstrain_3x_coco In Collection: PAA Config: configs/paa/paa_r50_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 3.7 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth - Name: paa_r101_fpn_1x_coco In Collection: PAA Config: configs/paa/paa_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth - Name: paa_r101_fpn_2x_coco In Collection: PAA Config: configs/paa/paa_r101_fpn_2x_coco.py Metadata: Training Memory (GB): 6.2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth - Name: paa_r101_fpn_mstrain_3x_coco In Collection: PAA Config: configs/paa/paa_r101_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 6.2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/paa/paa_r101_fpn_1x_coco.py ================================================ _base_ = './paa_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/paa/paa_r101_fpn_2x_coco.py ================================================ _base_ = './paa_r101_fpn_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/paa/paa_r101_fpn_mstrain_3x_coco.py ================================================ _base_ = './paa_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/paa/paa_r50_fpn_1.5x_coco.py ================================================ _base_ = './paa_r50_fpn_1x_coco.py' lr_config = dict(step=[12, 16]) runner = dict(type='EpochBasedRunner', max_epochs=18) ================================================ FILE: DLTA_AI_app/mmdetection/configs/paa/paa_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='PAA', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='PAAHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/paa/paa_r50_fpn_2x_coco.py ================================================ _base_ = './paa_r50_fpn_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/paa/paa_r50_fpn_mstrain_3x_coco.py ================================================ _base_ = './paa_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( neck=dict( type='PAFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pafpn/metafile.yml ================================================ Collections: - Name: PAFPN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - PAFPN Paper: URL: https://arxiv.org/abs/1803.01534 Title: 'Path Aggregation Network for Instance Segmentation' README: configs/pafpn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/pafpn.py#L11 Version: v2.0.0 Models: - Name: faster_rcnn_r50_pafpn_1x_coco In Collection: PAFPN Config: configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 58.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/panoptic_fpn/metafile.yml ================================================ Collections: - Name: PanopticFPN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - PanopticFPN Paper: URL: https://arxiv.org/pdf/1901.02446 Title: 'Panoptic feature pyramid networks' README: configs/panoptic_fpn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/detectors/panoptic_fpn.py#L7 Version: v2.16.0 Models: - Name: panoptic_fpn_r50_fpn_1x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.6 Epochs: 12 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 40.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth - Name: panoptic_fpn_r50_fpn_mstrain_3x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.6 Epochs: 36 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 42.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth - Name: panoptic_fpn_r101_fpn_1x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.5 Epochs: 12 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 42.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth - Name: panoptic_fpn_r101_fpn_mstrain_3x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 6.5 Epochs: 36 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 44.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py ================================================ _base_ = './panoptic_fpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py ================================================ _base_ = './panoptic_fpn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_panoptic.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='PanopticFPN', semantic_head=dict( type='PanopticFPNHead', num_things_classes=80, num_stuff_classes=53, in_channels=256, inner_channels=128, start_level=0, end_level=4, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), conv_cfg=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)), panoptic_fusion_head=dict( type='HeuristicFusionHead', num_things_classes=80, num_stuff_classes=53), test_cfg=dict( panoptic=dict( score_thr=0.6, max_per_img=100, mask_thr_binary=0.5, mask_overlap=0.5, nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True), stuff_area_limit=4096))) custom_hooks = [] ================================================ FILE: DLTA_AI_app/mmdetection/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py ================================================ _base_ = './panoptic_fpn_r50_fpn_1x_coco.py' # dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 4), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( train=dict( _delete_=True, type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/panoptic_train2017.json', img_prefix=data_root + 'train2017/', seg_prefix=data_root + 'annotations/panoptic_train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576), (1333, 608), (1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=[ data_root + 'VOC2007/ImageSets/Main/trainval.txt', data_root + 'VOC2012/ImageSets/Main/trainval.txt' ], img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=100, warmup_ratio=0.001, step=[12000, 16000]) # Runner type runner = dict(type='IterBasedRunner', max_iters=18000) checkpoint_config = dict(interval=3000) evaluation = dict(interval=3000, metric='mAP') ================================================ FILE: DLTA_AI_app/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 ================================================ FILE: DLTA_AI_app/mmdetection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') # dataset settings dataset_type = 'CocoDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1000, 600), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file='data/voc0712_trainval.json', img_prefix='data/VOCdevkit', pipeline=train_pipeline, classes=CLASSES)), val=dict( type=dataset_type, ann_file='data/voc07_test.json', img_prefix='data/VOCdevkit', pipeline=test_pipeline, classes=CLASSES), test=dict( type=dataset_type, ann_file='data/voc07_test.json', img_prefix='data/VOCdevkit', pipeline=test_pipeline, classes=CLASSES)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 ================================================ FILE: DLTA_AI_app/mmdetection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=20)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 ================================================ FILE: DLTA_AI_app/mmdetection/configs/pascal_voc/ssd300_voc0712.py ================================================ _base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict( bbox_head=dict( num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 20]) checkpoint_config = dict(interval=1) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=24) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pascal_voc/ssd512_voc0712.py ================================================ _base_ = 'ssd300_voc0712.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), anchor_generator=dict( input_size=input_size, strides=[8, 16, 32, 64, 128, 256, 512], basesize_ratio_range=(0.15, 0.9), ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2])))) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/metafile.yml ================================================ Collections: - Name: PISA Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - PISA - RPN - ResNet - RoIPool Paper: URL: https://arxiv.org/abs/1904.04821 Title: 'Prime Sample Attention in Object Detection' README: configs/pisa/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/pisa_roi_head.py#L8 Version: v2.1.0 Models: - Name: pisa_faster_rcnn_r50_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth - Name: pisa_faster_rcnn_x101_32x4d_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth - Name: pisa_mask_rcnn_r50_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth - Name: pisa_retinanet_r50_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth - Name: pisa_retinanet_x101_32x4d_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth - Name: pisa_ssd300_coco In Collection: PISA Config: configs/pisa/pisa_ssd300_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 27.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth - Name: pisa_ssd512_coco In Collection: PISA Config: configs/pisa/pisa_ssd512_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 31.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py ================================================ _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict( bbox_head=dict( type='PISARetinaHead', loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py' model = dict( bbox_head=dict( type='PISARetinaHead', loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/pisa_ssd300_coco.py ================================================ _base_ = '../ssd/ssd300_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pisa/pisa_ssd512_coco.py ================================================ _base_ = '../ssd/ssd512_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/point_rend/metafile.yml ================================================ Collections: - Name: PointRend Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - PointRend - FPN - ResNet Paper: URL: https://arxiv.org/abs/1912.08193 Title: 'PointRend: Image Segmentation as Rendering' README: configs/point_rend/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/point_rend.py#L6 Version: v2.2.0 Models: - Name: point_rend_r50_caffe_fpn_mstrain_1x_coco In Collection: PointRend Config: configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py Metadata: Training Memory (GB): 4.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth - Name: point_rend_r50_caffe_fpn_mstrain_3x_coco In Collection: PointRend Config: configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.6 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' # model settings model = dict( type='PointRend', roi_head=dict( type='PointRendRoIHead', mask_roi_extractor=dict( type='GenericRoIExtractor', aggregation='concat', roi_layer=dict( _delete_=True, type='SimpleRoIAlign', output_size=14), out_channels=256, featmap_strides=[4]), mask_head=dict( _delete_=True, type='CoarseMaskHead', num_fcs=2, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), point_head=dict( type='MaskPointHead', num_fcs=3, in_channels=256, fc_channels=256, num_classes=80, coarse_pred_each_layer=True, loss_point=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rcnn=dict( mask_size=7, num_points=14 * 14, oversample_ratio=3, importance_sample_ratio=0.75)), test_cfg=dict( rcnn=dict( subdivision_steps=5, subdivision_num_points=28 * 28, scale_factor=2))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py ================================================ _base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/metafile.yml ================================================ Models: - Name: retinanet_pvt-t_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvt-t_fpn_1x_coco.py Metadata: Training Memory (GB): 8.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth Paper: URL: https://arxiv.org/abs/2102.12122 Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 Version: 2.17.0 - Name: retinanet_pvt-s_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvt-s_fpn_1x_coco.py Metadata: Training Memory (GB): 14.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth Paper: URL: https://arxiv.org/abs/2102.12122 Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 Version: 2.17.0 - Name: retinanet_pvt-m_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvt-m_fpn_1x_coco.py Metadata: Training Memory (GB): 20.9 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth Paper: URL: https://arxiv.org/abs/2102.12122 Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 Version: 2.17.0 - Name: retinanet_pvtv2-b0_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py Metadata: Training Memory (GB): 7.4 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b1_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py Metadata: Training Memory (GB): 9.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b2_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py Metadata: Training Memory (GB): 16.2 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b3_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py Metadata: Training Memory (GB): 23.0 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b4_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py Metadata: Training Memory (GB): 17.0 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b5_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py Metadata: Training Memory (GB): 18.7 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py ================================================ _base_ = 'retinanet_pvt-t_fpn_1x_coco.py' model = dict( backbone=dict( num_layers=[3, 8, 27, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_large.pth'))) fp16 = dict(loss_scale=dict(init_scale=512)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py ================================================ _base_ = 'retinanet_pvt-t_fpn_1x_coco.py' model = dict( backbone=dict( num_layers=[3, 4, 18, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_medium.pth'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py ================================================ _base_ = 'retinanet_pvt-t_fpn_1x_coco.py' model = dict( backbone=dict( num_layers=[3, 4, 6, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_small.pth'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='RetinaNet', backbone=dict( _delete_=True, type='PyramidVisionTransformer', num_layers=[2, 2, 2, 2], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_tiny.pth')), neck=dict(in_channels=[64, 128, 320, 512])) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='RetinaNet', backbone=dict( _delete_=True, type='PyramidVisionTransformerV2', embed_dims=32, num_layers=[2, 2, 2, 2], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b0.pth')), neck=dict(in_channels=[32, 64, 160, 256])) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py ================================================ _base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b1.pth')), neck=dict(in_channels=[64, 128, 320, 512])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py ================================================ _base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 4, 6, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b2.pth')), neck=dict(in_channels=[64, 128, 320, 512])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py ================================================ _base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 4, 18, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b3.pth')), neck=dict(in_channels=[64, 128, 320, 512])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py ================================================ _base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 8, 27, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b4.pth')), neck=dict(in_channels=[64, 128, 320, 512])) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) # dataset settings data = dict(samples_per_gpu=1, workers_per_gpu=1) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (1 samples per GPU) auto_scale_lr = dict(base_batch_size=8) ================================================ FILE: DLTA_AI_app/mmdetection/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py ================================================ _base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 6, 40, 3], mlp_ratios=(4, 4, 4, 4), init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b5.pth')), neck=dict(in_channels=[64, 128, 320, 512])) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) # dataset settings data = dict(samples_per_gpu=1, workers_per_gpu=1) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (1 samples per GPU) auto_scale_lr = dict(base_batch_size=8) ================================================ FILE: DLTA_AI_app/mmdetection/configs/queryinst/metafile.yml ================================================ Collections: - Name: QueryInst Metadata: Training Data: COCO Training Techniques: - AdamW - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet - QueryInst Paper: URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Fang_Instances_As_Queries_ICCV_2021_paper.pdf Title: 'Instances as Queries' README: configs/queryinst/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/queryinst.py Version: v2.18.0 Models: - Name: queryinst_r50_fpn_1x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth - Name: queryinst_r50_fpn_mstrain_480-800_3x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth - Name: queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth - Name: queryinst_r101_fpn_mstrain_480-800_3x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth - Name: queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py ================================================ _base_ = './queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py ================================================ _base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/queryinst/queryinst_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] num_stages = 6 num_proposals = 100 model = dict( type='QueryInst', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, add_extra_convs='on_input', num_outs=4), rpn_head=dict( type='EmbeddingRPNHead', num_proposals=num_proposals, proposal_feature_channel=256), roi_head=dict( type='SparseRoIHead', num_stages=num_stages, stage_loss_weights=[1] * num_stages, proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='DIIHead', num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=False, target_means=[0., 0., 0., 0.], target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) ], mask_head=[ dict( type='DynamicMaskHead', dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), num_convs=4, num_classes=80, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, class_agnostic=False, norm_cfg=dict(type='BN'), upsample_cfg=dict(type='deconv', scale_factor=2), loss_mask=dict( type='DiceLoss', loss_weight=8.0, use_sigmoid=True, activate=False, eps=1e-5)) for _ in range(num_stages) ]), # training and testing settings train_cfg=dict( rpn=None, rcnn=[ dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)), sampler=dict(type='PseudoSampler'), pos_weight=1, mask_size=28, ) for _ in range(num_stages) ]), test_cfg=dict( rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[8, 11], warmup_iters=1000) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py ================================================ _base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py ================================================ _base_ = './queryinst_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, value) for value in min_values], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict(policy='step', step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py ================================================ _base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py ================================================ _base_ = [ '../common/mstrain_3x_coco_instance.py', '../_base_/models/cascade_mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ # Images are converted to float32 directly after loading in PyCls dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(weight_decay=0.00005) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py ================================================ _base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py ================================================ _base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py ================================================ _base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py ================================================ _base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py ================================================ _base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py ================================================ _base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(weight_decay=0.00005) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py ================================================ _base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py ================================================ _base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py ================================================ _base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_12gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_12gf')), neck=dict( type='FPN', in_channels=[224, 448, 896, 2240], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ # Images are converted to float32 directly after loading in PyCls dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py ================================================ _base_ = 'mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_6.4gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_6.4gf')), neck=dict( type='FPN', in_channels=[168, 392, 784, 1624], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py ================================================ _base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py ================================================ _base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_8.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 720, 1920], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/metafile.yml ================================================ Models: - Name: mask_rcnn_regnetx-3.2GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-4GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py Metadata: Training Memory (GB): 5.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-6.4GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py Metadata: Training Memory (GB): 6.1 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-8GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py Metadata: Training Memory (GB): 6.4 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-12GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py Metadata: Training Memory (GB): 7.4 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-3.2GF_fpn_1x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py Metadata: Training Memory (GB): 4.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-3.2GF_fpn_2x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py Metadata: Training Memory (GB): 4.5 Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: retinanet_regnetx-800MF_fpn_1x_coco In Collection: RetinaNet Config: configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py Metadata: Training Memory (GB): 2.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 35.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: retinanet_regnetx-1.6GF_fpn_1x_coco In Collection: RetinaNet Config: configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py Metadata: Training Memory (GB): 3.3 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: retinanet_regnetx-3.2GF_fpn_1x_coco In Collection: RetinaNet Config: configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py Metadata: Training Memory (GB): 4.2 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 2.3 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 2.8 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 3.4 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.4 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.9 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 2.5 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 2.9 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 3.6 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 5.1 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.8 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.4 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 6.4 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 6.9 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py ================================================ _base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py ================================================ _base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' model = dict( bbox_head=dict(transform_method='minmax', use_grid_points=True), # training and testing settings train_cfg=dict( init=dict( assigner=dict( _delete_=True, type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/metafile.yml ================================================ Collections: - Name: RepPoints Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Group Normalization - FPN - RepPoints - ResNet Paper: URL: https://arxiv.org/abs/1904.11490 Title: 'RepPoints: Point Set Representation for Object Detection' README: configs/reppoints/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/reppoints_detector.py#L9 Version: v2.0.0 Models: - Name: bbox_r50_grid_fpn_gn-neck+head_1x_coco In Collection: RepPoints Config: configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py Metadata: Training Memory (GB): 3.9 inference time (ms/im): - value: 62.89 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth - Name: bbox_r50_grid_center_fpn_gn-neck+head_1x_coco In Collection: RepPoints Config: configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py Metadata: Training Memory (GB): 3.9 inference time (ms/im): - value: 64.94 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth - Name: reppoints_moment_r50_fpn_1x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.3 inference time (ms/im): - value: 54.05 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth - Name: reppoints_moment_r50_fpn_gn-neck+head_1x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py Metadata: Training Memory (GB): 3.9 inference time (ms/im): - value: 57.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952-3e51b550.pth - Name: reppoints_moment_r50_fpn_gn-neck+head_2x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py Metadata: Training Memory (GB): 3.9 inference time (ms/im): - value: 57.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth - Name: reppoints_moment_r101_fpn_gn-neck+head_2x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py Metadata: Training Memory (GB): 5.8 inference time (ms/im): - value: 72.99 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth - Name: reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py Metadata: Training Memory (GB): 5.9 inference time (ms/im): - value: 82.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth - Name: reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py Metadata: Training Memory (GB): 7.1 inference time (ms/im): - value: 107.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' model = dict(bbox_head=dict(transform_method='minmax')) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' model = dict( backbone=dict( depth=101, dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='RepPointsDetector', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), bbox_head=dict( type='RepPointsHead', num_classes=80, in_channels=256, feat_channels=256, point_feat_channels=256, stacked_convs=3, num_points=9, gradient_mul=0.1, point_strides=[8, 16, 32, 64, 128], point_base_scale=4, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5), loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0), transform_method='moment'), # training and testing settings train_cfg=dict( init=dict( assigner=dict(type='PointAssigner', scale=4, pos_num=1), allowed_border=-1, pos_weight=-1, debug=False), refine=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) optimizer = dict(lr=0.01) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) optimizer = dict(lr=0.01) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py ================================================ _base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' model = dict(bbox_head=dict(transform_method='partial_minmax')) ================================================ FILE: DLTA_AI_app/mmdetection/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/res2net/htc_r2_101_fpn_20e_coco.py ================================================ _base_ = '../htc/htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/res2net/metafile.yml ================================================ Models: - Name: faster_rcnn_r2_101_fpn_2x_coco In Collection: Faster R-CNN Config: configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py Metadata: Training Memory (GB): 7.4 Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Res2Net Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth Paper: URL: https://arxiv.org/abs/1904.01169 Title: 'Res2Net for object detection and instance segmentation' README: configs/res2net/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 Version: v2.1.0 - Name: mask_rcnn_r2_101_fpn_2x_coco In Collection: Mask R-CNN Config: configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py Metadata: Training Memory (GB): 7.9 Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Res2Net Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth Paper: URL: https://arxiv.org/abs/1904.01169 Title: 'Res2Net for object detection and instance segmentation' README: configs/res2net/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 Version: v2.1.0 - Name: cascade_rcnn_r2_101_fpn_20e_coco In Collection: Cascade R-CNN Config: configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py Metadata: Training Memory (GB): 7.8 Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Res2Net Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth Paper: URL: https://arxiv.org/abs/1904.01169 Title: 'Res2Net for object detection and instance segmentation' README: configs/res2net/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 Version: v2.1.0 - Name: cascade_mask_rcnn_r2_101_fpn_20e_coco In Collection: Cascade R-CNN Config: configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py Metadata: Training Memory (GB): 9.5 Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Res2Net Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth Paper: URL: https://arxiv.org/abs/1904.01169 Title: 'Res2Net for object detection and instance segmentation' README: configs/res2net/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 Version: v2.1.0 - Name: htc_r2_101_fpn_20e_coco In Collection: HTC Config: configs/res2net/htc_r2_101_fpn_20e_coco.py Metadata: Epochs: 20 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Res2Net Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth Paper: URL: https://arxiv.org/abs/1904.01169 Title: 'Res2Net for object detection and instance segmentation' README: configs/res2net/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 Version: v2.1.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py ================================================ _base_ = './cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' model = dict( backbone=dict( stem_channels=128, depth=101, init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), roi_head=dict( bbox_head=[ dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_head=dict(norm_cfg=norm_cfg))) # # use ResNeSt img_norm img_norm_cfg = dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py ================================================ _base_ = './cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' model = dict( backbone=dict( stem_channels=128, depth=101, init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py ================================================ _base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), roi_head=dict( bbox_head=[ dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], )) # # use ResNeSt img_norm img_norm_cfg = dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=False, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py ================================================ _base_ = './faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' model = dict( backbone=dict( stem_channels=128, depth=101, init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py ================================================ _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg))) # # use ResNeSt img_norm img_norm_cfg = dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=False, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py ================================================ _base_ = './mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' model = dict( backbone=dict( stem_channels=128, depth=101, init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py ================================================ _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) # # use ResNeSt img_norm img_norm_cfg = dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnest/metafile.yml ================================================ Models: - Name: faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco In Collection: Faster R-CNN Config: configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py Metadata: Training Memory (GB): 4.8 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNeSt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 - Name: faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco In Collection: Faster R-CNN Config: configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py Metadata: Training Memory (GB): 7.1 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNeSt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201006_021058-421517f1.pth Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 - Name: mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco In Collection: Mask R-CNN Config: configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py Metadata: Training Memory (GB): 5.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNeSt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20200926_125503-8a2c3d47.pth Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 - Name: mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco In Collection: Mask R-CNN Config: configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py Metadata: Training Memory (GB): 7.8 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNeSt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_215831-af60cdf9.pth Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 - Name: cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco In Collection: Cascade R-CNN Config: configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py Metadata: Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNeSt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201122_213640-763cc7b5.pth Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 - Name: cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco In Collection: Cascade R-CNN Config: configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py Metadata: Training Memory (GB): 8.4 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNeSt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201005_113242-b9459f8f.pth Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 - Name: cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco In Collection: Cascade R-CNN Config: configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py Metadata: Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNeSt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201122_104428-99eca4c7.pth Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 - Name: cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco In Collection: Cascade R-CNN Config: configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py Metadata: Training Memory (GB): 10.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNeSt Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_113243-42607475.pth Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa model = dict( backbone=dict( init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) optimizer = dict( _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa model = dict( backbone=dict( init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) optimizer = dict( _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa model = dict( backbone=dict( init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) optimizer = dict( _delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05, paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnet_strikes_back/metafile.yml ================================================ Models: - Name: faster_rcnn_r50_fpn_rsb-pretrain_1x_coco In Collection: Faster R-CNN Config: configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py Metadata: Training Memory (GB): 3.9 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229-32ae82a9.pth Paper: URL: https://arxiv.org/abs/2110.00476 Title: 'ResNet strikes back: An improved training procedure in timm' README: configs/resnet_strikes_back/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md Version: v2.22.0 - Name: cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco In Collection: Cascade R-CNN Config: configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py Metadata: Training Memory (GB): 6.2 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636-8b9ad50f.pth Paper: URL: https://arxiv.org/abs/2110.00476 Title: 'ResNet strikes back: An improved training procedure in timm' README: configs/resnet_strikes_back/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md Version: v2.22.0 - Name: retinanet_r50_fpn_rsb-pretrain_1x_coco In Collection: RetinaNet Config: configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py Metadata: Training Memory (GB): 3.8 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432-bd24aae9.pth Paper: URL: https://arxiv.org/abs/2110.00476 Title: 'ResNet strikes back: An improved training procedure in timm' README: configs/resnet_strikes_back/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md Version: v2.22.0 - Name: mask_rcnn_r50_fpn_rsb-pretrain_1x_coco In Collection: Mask R-CNN Config: configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py Metadata: Training Memory (GB): 4.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth Paper: URL: https://arxiv.org/abs/2110.00476 Title: 'ResNet strikes back: An improved training procedure in timm' README: configs/resnet_strikes_back/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md Version: v2.22.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa model = dict( backbone=dict( init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05, paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/metafile.yml ================================================ Collections: - Name: RetinaNet Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Focal Loss - FPN - ResNet Paper: URL: https://arxiv.org/abs/1708.02002 Title: "Focal Loss for Dense Object Detection" README: configs/retinanet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/retinanet.py#L6 Version: v2.0.0 Models: - Name: retinanet_r18_fpn_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r18_fpn_1x_coco.py Metadata: Training Memory (GB): 1.7 Training Resources: 8x V100 GPUs Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 31.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth - Name: retinanet_r18_fpn_1x8_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py Metadata: Training Memory (GB): 5.0 Training Resources: 1x V100 GPUs Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 31.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth - Name: retinanet_r50_caffe_fpn_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 3.5 inference time (ms/im): - value: 53.76 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth - Name: retinanet_r50_fpn_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.8 inference time (ms/im): - value: 52.63 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth - Name: retinanet_r50_fpn_fp16_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py Metadata: Training Memory (GB): 2.8 Training Techniques: - SGD with Momentum - Weight Decay - Mixed Precision Training inference time (ms/im): - value: 31.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth - Name: retinanet_r50_fpn_2x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r50_fpn_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth - Name: retinanet_r50_fpn_mstrain_640-800_3x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth - Name: retinanet_r101_caffe_fpn_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 5.5 inference time (ms/im): - value: 68.03 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth - Name: retinanet_r101_caffe_fpn_mstrain_3x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth - Name: retinanet_r101_fpn_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 5.7 inference time (ms/im): - value: 66.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth - Name: retinanet_r101_fpn_2x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r101_fpn_2x_coco.py Metadata: Training Memory (GB): 5.7 inference time (ms/im): - value: 66.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth - Name: retinanet_r101_fpn_mstrain_640-800_3x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth - Name: retinanet_x101_32x4d_fpn_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 82.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth - Name: retinanet_x101_32x4d_fpn_2x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 82.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth - Name: retinanet_x101_64x4d_fpn_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.0 inference time (ms/im): - value: 114.94 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth - Name: retinanet_x101_64x4d_fpn_2x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 10.0 inference time (ms/im): - value: 114.94 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth - Name: retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './retinanet_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py ================================================ _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy model = dict( pretrained='open-mmlab://detectron2/resnet101_caffe', backbone=dict(depth=101)) lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r101_fpn_1x_coco.py ================================================ _base_ = './retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r101_fpn_2x_coco.py ================================================ _base_ = './retinanet_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' ] # optimizer model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # data data = dict(samples_per_gpu=8) # optimizer model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512])) # Note: If the learning rate is set to 0.0025, the mAP will be 32.4. optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (1 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=8) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r18_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # optimizer model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512])) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=16) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py ================================================ _base_ = './retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py ================================================ _base_ = './retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py ================================================ _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[16, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py ================================================ _base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_fpn_2x_coco.py ================================================ _base_ = './retinanet_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_fpn_90k_coco.py ================================================ _base_ = 'retinanet_r50_fpn_1x_coco.py' # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[60000, 80000]) # Runner type runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) checkpoint_config = dict(interval=10000) evaluation = dict(interval=10000, metric='bbox') ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py ================================================ _base_ = './retinanet_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.) # set grad_norm for stability during mixed-precision training optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' ] # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py ================================================ _base_ = './retinanet_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py ================================================ _base_ = './retinanet_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' ] # optimizer model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4)) optimizer = dict(type='SGD', lr=0.01) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py ================================================ _base_ = './rpn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_r101_fpn_1x_coco.py ================================================ _base_ = './rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_r101_fpn_2x_coco.py ================================================ _base_ = './rpn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_r50_caffe_c4_1x_coco.py ================================================ _base_ = [ '../_base_/models/rpn_r50_caffe_c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # dataset settings img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_label=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) evaluation = dict(interval=1, metric='proposal_fast') ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py ================================================ _base_ = './rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_label=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_label=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes']), ] data = dict(train=dict(pipeline=train_pipeline)) evaluation = dict(interval=1, metric='proposal_fast') ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_r50_fpn_2x_coco.py ================================================ _base_ = './rpn_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py ================================================ _base_ = './rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py ================================================ _base_ = './rpn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py ================================================ _base_ = './rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py ================================================ _base_ = './rpn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/metafile.yml ================================================ Collections: - Name: SABL Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet - SABL Paper: URL: https://arxiv.org/abs/1912.04260 Title: 'Side-Aware Boundary Localization for More Precise Object Detection' README: configs/sabl/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/roi_heads/bbox_heads/sabl_head.py#L14 Version: v2.4.0 Models: - Name: sabl_faster_rcnn_r50_fpn_1x_coco In Collection: SABL Config: configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth - Name: sabl_faster_rcnn_r101_fpn_1x_coco In Collection: SABL Config: configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/sabl_faster_rcnn_r101_fpn_1x_coco-f804c6c1.pth - Name: sabl_cascade_rcnn_r50_fpn_1x_coco In Collection: SABL Config: configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/sabl_cascade_rcnn_r50_fpn_1x_coco-e1748e5e.pth - Name: sabl_cascade_rcnn_r101_fpn_1x_coco In Collection: SABL Config: configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/sabl_cascade_rcnn_r101_fpn_1x_coco-2b83e87c.pth - Name: sabl_retinanet_r50_fpn_1x_coco In Collection: SABL Config: configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth - Name: sabl_retinanet_r50_fpn_gn_1x_coco In Collection: SABL Config: configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/sabl_retinanet_r50_fpn_gn_1x_coco-e16dfcf1.pth - Name: sabl_retinanet_r101_fpn_1x_coco In Collection: SABL Config: configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/sabl_retinanet_r101_fpn_1x_coco-42026904.pth - Name: sabl_retinanet_r101_fpn_gn_1x_coco In Collection: SABL Config: configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/sabl_retinanet_r101_fpn_gn_1x_coco-40a893e8.pth - Name: sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco In Collection: SABL Config: configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco-1e63382c.pth - Name: sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco In Collection: SABL Config: configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco-5342f857.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), roi_head=dict(bbox_head=[ dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)), dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)), dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( roi_head=dict(bbox_head=[ dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)), dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)), dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) ])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), roi_head=dict( bbox_head=dict( _delete_=True, type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( _delete_=True, type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), norm_cfg=norm_cfg, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), norm_cfg=norm_cfg, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 960)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), norm_cfg=norm_cfg, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), norm_cfg=norm_cfg, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/scnet/metafile.yml ================================================ Collections: - Name: SCNet Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet - SCNet Paper: URL: https://arxiv.org/abs/2012.10150 Title: 'SCNet: Training Inference Sample Consistency for Instance Segmentation' README: configs/scnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/scnet.py#L6 Version: v2.9.0 Models: - Name: scnet_r50_fpn_1x_coco In Collection: SCNet Config: configs/scnet/scnet_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 161.29 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth - Name: scnet_r50_fpn_20e_coco In Collection: SCNet Config: configs/scnet/scnet_r50_fpn_20e_coco.py Metadata: Training Memory (GB): 7.0 inference time (ms/im): - value: 161.29 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth - Name: scnet_r101_fpn_20e_coco In Collection: SCNet Config: configs/scnet/scnet_r101_fpn_20e_coco.py Metadata: Training Memory (GB): 8.9 inference time (ms/im): - value: 172.41 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth - Name: scnet_x101_64x4d_fpn_20e_coco In Collection: SCNet Config: configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py Metadata: Training Memory (GB): 13.2 inference time (ms/im): - value: 204.08 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 20 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/scnet/scnet_r101_fpn_20e_coco.py ================================================ _base_ = './scnet_r50_fpn_20e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/scnet/scnet_r50_fpn_1x_coco.py ================================================ _base_ = '../htc/htc_r50_fpn_1x_coco.py' # model settings model = dict( type='SCNet', roi_head=dict( _delete_=True, type='SCNetRoIHead', num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='SCNetBBoxHead', num_shared_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='SCNetBBoxHead', num_shared_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='SCNetBBoxHead', num_shared_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='SCNetMaskHead', num_convs=12, in_channels=256, conv_out_channels=256, num_classes=80, conv_to_res=True, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), semantic_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[8]), semantic_head=dict( type='SCNetSemanticHead', num_ins=5, fusion_level=1, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), conv_to_res=True), glbctx_head=dict( type='GlobalContextHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_weight=3.0, conv_to_res=True), feat_relay_head=dict( type='FeatureRelayHead', in_channels=1024, out_conv_channels=256, roi_feat_size=7, scale_factor=2))) # uncomment below code to enable test time augmentations # img_norm_cfg = dict( # mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # test_pipeline = [ # dict(type='LoadImageFromFile'), # dict( # type='MultiScaleFlipAug', # img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800), # (1400, 2100)], # flip=True, # transforms=[ # dict(type='Resize', keep_ratio=True), # dict(type='RandomFlip', flip_ratio=0.5), # dict(type='Normalize', **img_norm_cfg), # dict(type='Pad', size_divisor=32), # dict(type='ImageToTensor', keys=['img']), # dict(type='Collect', keys=['img']), # ]) # ] # data = dict( # val=dict(pipeline=test_pipeline), # test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/scnet/scnet_r50_fpn_20e_coco.py ================================================ _base_ = './scnet_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py ================================================ _base_ = './scnet_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/scnet/scnet_x101_64x4d_fpn_8x1_20e_coco.py ================================================ _base_ = './scnet_x101_64x4d_fpn_20e_coco.py' data = dict(samples_per_gpu=1, workers_per_gpu=1) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (1 samples per GPU) auto_scale_lr = dict(base_batch_size=8) ================================================ FILE: DLTA_AI_app/mmdetection/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg, init_cfg=None), neck=dict(norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg))) # optimizer optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) optimizer_config = dict(_delete_=True, grad_clip=None) # learning policy lr_config = dict(warmup_ratio=0.1, step=[65, 71]) runner = dict(type='EpochBasedRunner', max_epochs=73) ================================================ FILE: DLTA_AI_app/mmdetection/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg, init_cfg=None), neck=dict(norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) # optimizer optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) optimizer_config = dict(_delete_=True, grad_clip=None) # learning policy lr_config = dict(warmup_ratio=0.1, step=[65, 71]) runner = dict(type='EpochBasedRunner', max_epochs=73) ================================================ FILE: DLTA_AI_app/mmdetection/configs/scratch/metafile.yml ================================================ Collections: - Name: Rethinking ImageNet Pre-training Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - RPN - ResNet Paper: URL: https://arxiv.org/abs/1811.08883 Title: 'Rethinking ImageNet Pre-training' README: configs/scratch/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py Version: v2.0.0 Models: - Name: faster_rcnn_r50_fpn_gn-all_scratch_6x_coco In Collection: Rethinking ImageNet Pre-training Config: configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py Metadata: Epochs: 72 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth - Name: mask_rcnn_r50_fpn_gn-all_scratch_6x_coco In Collection: Rethinking ImageNet Pre-training Config: configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py Metadata: Epochs: 72 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py ================================================ _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] dataset_type = 'LVISV1Dataset' data_root = 'data/lvis_v1/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_train.json', img_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root, pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root, pipeline=test_pipeline)) evaluation = dict(interval=24, metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py ================================================ _base_ = './cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501 model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py ================================================ _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) evaluation = dict(interval=24, metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py ================================================ _base_ = './cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501 model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0)), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] dataset_type = 'LVISV1Dataset' data_root = 'data/lvis_v1/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_train.json', img_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root, pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root, pipeline=test_pipeline)) evaluation = dict(interval=24, metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0)), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) evaluation = dict(interval=12, metric=['bbox', 'segm']) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py ================================================ _base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/seesaw_loss/metafile.yml ================================================ Collections: - Name: Seesaw Loss Metadata: Training Data: LVIS Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Softmax - RPN - Convolution - Dense Connections - FPN - ResNet - RoIAlign - Seesaw Loss Paper: URL: https://arxiv.org/abs/2008.10032 Title: 'Seesaw Loss for Long-Tailed Instance Segmentation' README: configs/seesaw_loss/README.md Models: - Name: mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 25.6 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 25.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth - Name: mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 25.6 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 25.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth - Name: mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 27.4 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 26.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth - Name: mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 27.2 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 27.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 27.6 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 26.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 27.6 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 26.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 28.9 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 27.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 28.9 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 28.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 33.1 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 29.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 33.0 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 30.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 30.0 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 29.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 32.8 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 30.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs '../common/ssj_270k_coco_instance.py', ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed. head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) model = dict( backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg), neck=dict(norm_cfg=norm_cfg), rpn_head=dict(num_convs=2), # leads to 0.1+ mAP roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=head_norm_cfg), mask_head=dict(norm_cfg=head_norm_cfg))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py ================================================ _base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py' # lr steps at [0.9, 0.95, 0.975] of the maximum iterations lr_config = dict( warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750]) # 90k iterations with batch_size 64 is roughly equivalent to 48 epochs runner = dict(type='IterBasedRunner', max_iters=90000) ================================================ FILE: DLTA_AI_app/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs '../common/ssj_scp_270k_coco_instance.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed. head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) model = dict( backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg), neck=dict(norm_cfg=norm_cfg), rpn_head=dict(num_convs=2), # leads to 0.1+ mAP roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=head_norm_cfg), mask_head=dict(norm_cfg=head_norm_cfg))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py ================================================ _base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py' # lr steps at [0.9, 0.95, 0.975] of the maximum iterations lr_config = dict( warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750]) # 90k iterations with batch_size 64 is roughly equivalent to 48 epochs runner = dict(type='IterBasedRunner', max_iters=90000) ================================================ FILE: DLTA_AI_app/mmdetection/configs/simple_copy_paste/metafile.yml ================================================ Collections: - Name: SimpleCopyPaste Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 32x A100 GPUs Architecture: - Softmax - RPN - Convolution - Dense Connections - FPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/abs/2012.07177 Title: "Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" README: configs/simple_copy_paste/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/datasets/pipelines/transforms.py#L2762 Version: v2.25.0 Models: - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco In Collection: SimpleCopyPaste Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py Metadata: Training Memory (GB): 7.2 Iterations: 270000 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco In Collection: SimpleCopyPaste Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py Metadata: Training Memory (GB): 7.2 Iterations: 90000 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco In Collection: SimpleCopyPaste Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py Metadata: Training Memory (GB): 7.2 Iterations: 270000 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco In Collection: SimpleCopyPaste Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py Metadata: Training Memory (GB): 7.2 Iterations: 90000 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py ================================================ _base_ = './decoupled_solo_r50_fpn_3x_coco.py' # model settings model = dict( mask_head=dict( type='DecoupledSOLOLightHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384), (852, 352)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(852, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solo/decoupled_solo_r50_fpn_1x_coco.py ================================================ _base_ = [ './solo_r50_fpn_1x_coco.py', ] # model settings model = dict( mask_head=dict( type='DecoupledSOLOHead', num_classes=80, in_channels=256, stacked_convs=7, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) optimizer = dict(type='SGD', lr=0.01) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solo/decoupled_solo_r50_fpn_3x_coco.py ================================================ _base_ = './solo_r50_fpn_3x_coco.py' # model settings model = dict( mask_head=dict( type='DecoupledSOLOHead', num_classes=80, in_channels=256, stacked_convs=7, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solo/metafile.yml ================================================ Collections: - Name: SOLO Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - Convolution - ResNet Paper: https://arxiv.org/abs/1912.04488 README: configs/solo/README.md Models: - Name: decoupled_solo_r50_fpn_1x_coco In Collection: SOLO Config: configs/solo/decoupled_solo_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 7.8 Epochs: 12 inference time (ms/im): - value: 116.4 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1333, 800) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 33.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth - Name: decoupled_solo_r50_fpn_3x_coco In Collection: SOLO Config: configs/solo/decoupled_solo_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 7.9 Epochs: 36 inference time (ms/im): - value: 117.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1333, 800) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth - Name: decoupled_solo_light_r50_fpn_3x_coco In Collection: SOLO Config: configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 2.2 Epochs: 36 inference time (ms/im): - value: 35.0 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (852, 512) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 32.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth - Name: solo_r50_fpn_3x_coco In Collection: SOLO Config: configs/solo/solo_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 7.4 Epochs: 36 inference time (ms/im): - value: 94.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1333, 800) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth - Name: solo_r50_fpn_1x_coco In Collection: SOLO Config: configs/solo/solo_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 8.0 Epochs: 12 inference time (ms/im): - value: 95.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1333, 800) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 33.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/solo/solo_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='SOLO', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, num_outs=5), mask_head=dict( type='SOLOHead', num_classes=80, in_channels=256, stacked_convs=7, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), # model training and testing settings test_cfg=dict( nms_pre=500, score_thr=0.1, mask_thr=0.5, filter_thr=0.05, kernel='gaussian', # gaussian/linear sigma=2.0, max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solo/solo_r50_fpn_3x_coco.py ================================================ _base_ = './solo_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), (1333, 672), (1333, 640)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/metafile.yml ================================================ Collections: - Name: SOLOv2 Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x A100 GPUs Architecture: - FPN - Convolution - ResNet Paper: https://arxiv.org/abs/2003.10152 README: configs/solov2/README.md Models: - Name: solov2_r50_fpn_1x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 5.1 Epochs: 12 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth - Name: solov2_r50_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 5.1 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth - Name: solov2_r101_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_r101_fpn_3x_coco.py Metadata: Training Memory (GB): 6.9 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth - Name: solov2_r101_dcn_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_r101_dcn_fpn_3x_coco.py Metadata: Training Memory (GB): 7.1 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth - Name: solov2_x101_dcn_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_x101_dcn_fpn_3x_coco.py Metadata: Training Memory (GB): 11.3 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth - Name: solov2_light_r18_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_light_r18_fpn_3x_coco.py Metadata: Training Memory (GB): 9.1 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 29.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth - Name: solov2_light_r34_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_light_r34_fpn_3x_coco.py Metadata: Training Memory (GB): 9.3 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 31.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839-e51659d3.pth - Name: solov2_light_r50_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_light_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 9.9 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 33.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_light_r18_fpn_3x_coco.py ================================================ _base_ = 'solov2_light_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_light_r34_fpn_3x_coco.py ================================================ _base_ = 'solov2_light_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( depth=34, init_cfg=dict(checkpoint='torchvision://resnet34')), neck=dict(in_channels=[64, 128, 256, 512])) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_light_r50_dcn_fpn_3x_coco.py ================================================ _base_ = 'solov2_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), mask_head=dict( feat_channels=256, stacked_convs=3, scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), mask_feature_head=dict(out_channels=128), dcn_cfg=dict(type='DCNv2'), dcn_apply_to_all_conv=False)) # light solov2 head # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36) # data img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), (768, 352)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(448, 768), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_light_r50_fpn_3x_coco.py ================================================ _base_ = 'solov2_r50_fpn_1x_coco.py' # model settings model = dict( mask_head=dict( stacked_convs=2, feat_channels=256, scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)), mask_feature_head=dict(out_channels=128))) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36) # data img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), (768, 352)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(448, 768), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py ================================================ _base_ = 'solov2_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(checkpoint='torchvision://resnet101'), dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), mask_head=dict( mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), dcn_cfg=dict(type='DCNv2'), dcn_apply_to_all_conv=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_r101_fpn_3x_coco.py ================================================ _base_ = 'solov2_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='SOLOv2', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, num_outs=5), mask_head=dict( type='SOLOV2Head', num_classes=80, in_channels=256, feat_channels=512, stacked_convs=4, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, mask_feature_head=dict( feat_channels=128, start_level=0, end_level=3, out_channels=256, mask_stride=4, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)), # model training and testing settings test_cfg=dict( nms_pre=500, score_thr=0.1, mask_thr=0.5, filter_thr=0.05, kernel='gaussian', # gaussian/linear sigma=2.0, max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_r50_fpn_3x_coco.py ================================================ _base_ = 'solov2_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), (1333, 672), (1333, 640)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py ================================================ _base_ = 'solov2_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')), mask_head=dict( mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), dcn_cfg=dict(type='DCNv2'), dcn_apply_to_all_conv=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sparse_rcnn/metafile.yml ================================================ Collections: - Name: Sparse R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet - Sparse R-CNN Paper: URL: https://arxiv.org/abs/2011.12450 Title: 'Sparse R-CNN: End-to-End Object Detection with Learnable Proposals' README: configs/sparse_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/sparse_rcnn.py#L6 Version: v2.9.0 Models: - Name: sparse_rcnn_r50_fpn_1x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth - Name: sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth - Name: sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth - Name: sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth - Name: sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py ================================================ _base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py ================================================ _base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] num_stages = 6 num_proposals = 100 model = dict( type='SparseRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, add_extra_convs='on_input', num_outs=4), rpn_head=dict( type='EmbeddingRPNHead', num_proposals=num_proposals, proposal_feature_channel=256), roi_head=dict( type='SparseRoIHead', num_stages=num_stages, stage_loss_weights=[1] * num_stages, proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='DIIHead', num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=False, target_means=[0., 0., 0., 0.], target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) ]), # training and testing settings train_cfg=dict( rpn=None, rcnn=[ dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)), sampler=dict(type='PseudoSampler'), pos_weight=1) for _ in range(num_stages) ]), test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals))) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001) optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py ================================================ _base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py ================================================ _base_ = './sparse_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, value) for value in min_values], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict(policy='step', step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ssd/metafile.yml ================================================ Collections: - Name: SSD Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - VGG Paper: URL: https://arxiv.org/abs/1512.02325 Title: 'SSD: Single Shot MultiBox Detector' README: configs/ssd/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.14.0/mmdet/models/dense_heads/ssd_head.py#L16 Version: v2.14.0 Models: - Name: ssd300_coco In Collection: SSD Config: configs/ssd/ssd300_coco.py Metadata: Training Memory (GB): 9.9 inference time (ms/im): - value: 22.88 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (300, 300) Epochs: 120 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 25.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth - Name: ssd512_coco In Collection: SSD Config: configs/ssd/ssd512_coco.py Metadata: Training Memory (GB): 19.4 inference time (ms/im): - value: 32.57 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512, 512) Epochs: 120 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 29.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth - Name: ssdlite_mobilenetv2_scratch_600e_coco In Collection: SSD Config: configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 14.3 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (320, 320) Epochs: 600 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 21.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/ssd/ssd300_coco.py ================================================ _base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( _delete_=True, type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(_delete_=True) custom_hooks = [ dict(type='NumClassCheckHook'), dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ssd/ssd300_fp16_coco.py ================================================ _base_ = ['./ssd300_coco.py'] fp16 = dict(loss_scale='dynamic') # learning policy # In order to avoid non-convergence in the early stage of # mixed-precision training, the warmup in the lr_config is set to linear, # warmup_iters increases and warmup_ratio decreases. lr_config = dict(warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 10) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ssd/ssd512_coco.py ================================================ _base_ = 'ssd300_coco.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=input_size, basesize_ratio_range=(0.1, 0.9), strides=[8, 16, 32, 64, 128, 256, 512], ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]))) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( _delete_=True, type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(_delete_=True) custom_hooks = [ dict(type='NumClassCheckHook'), dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ssd/ssd512_fp16_coco.py ================================================ _base_ = ['./ssd512_coco.py'] # fp16 settings fp16 = dict(loss_scale='dynamic') # learning policy # In order to avoid non-convergence in the early stage of # mixed-precision training, the warmup in the lr_config is set to linear, # warmup_iters increases and warmup_ratio decreases. lr_config = dict(warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 10) ================================================ FILE: DLTA_AI_app/mmdetection/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='SingleStageDetector', backbone=dict( type='MobileNetV2', out_indices=(4, 7), norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), neck=dict( type='SSDNeck', in_channels=(96, 1280), out_channels=(96, 1280, 512, 256, 256, 128), level_strides=(2, 2, 2, 2), level_paddings=(1, 1, 1, 1), l2_norm_scale=None, use_depthwise=True, norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), act_cfg=dict(type='ReLU6'), init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), bbox_head=dict( type='SSDHead', in_channels=(96, 1280, 512, 256, 256, 128), num_classes=80, use_depthwise=True, norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), act_cfg=dict(type='ReLU6'), init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), # set anchor size manually instead of using the predefined # SSD300 setting. anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, strides=[16, 32, 64, 107, 160, 320], ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], min_sizes=[48, 100, 150, 202, 253, 304], max_sizes=[100, 150, 202, 253, 304, 320]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2])), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False), test_cfg=dict( nms_pre=1000, nms=dict(type='nms', iou_threshold=0.45), min_bbox_size=0, score_thr=0.02, max_per_img=200)) cudnn_benchmark = True # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=320), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(320, 320), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=320), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=24, workers_per_gpu=4, train=dict( _delete_=True, type='RepeatDataset', # use RepeatDataset to speed up training times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='CosineAnnealing', warmup='linear', warmup_iters=500, warmup_ratio=0.001, min_lr=0) runner = dict(type='EpochBasedRunner', max_epochs=120) # Avoid evaluation and saving weights too frequently evaluation = dict(interval=5, metric='bbox') checkpoint_config = dict(interval=5) custom_hooks = [ dict(type='NumClassCheckHook'), dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (24 samples per GPU) auto_scale_lr = dict(base_batch_size=192) ================================================ FILE: DLTA_AI_app/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../common/lsj_100e_coco_instance.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed # Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) model = dict( backbone=dict( frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None, style='caffe'), neck=dict(norm_cfg=norm_cfg), rpn_head=dict(num_convs=2), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=head_norm_cfg), mask_head=dict(norm_cfg=head_norm_cfg))) file_client_args = dict(backend='disk') # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) image_size = (1024, 1024) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=image_size, ratio_range=(0.1, 2.0), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=image_size, recompute_bbox=True, allow_negative_crop=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=image_size), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py ================================================ _base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' fp16 = dict(loss_scale=512.) ================================================ FILE: DLTA_AI_app/mmdetection/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py ================================================ _base_ = './mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # Use RepeatDataset to speed up training # change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs) data = dict(train=dict(times=4 * 4)) lr_config = dict(warmup_iters=500 * 4) ================================================ FILE: DLTA_AI_app/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../common/lsj_100e_coco_instance.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed # Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) model = dict( # the model is trained from scratch, so init_cfg is None backbone=dict( frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None), neck=dict(norm_cfg=norm_cfg), rpn_head=dict(num_convs=2), # leads to 0.1+ mAP roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=head_norm_cfg), mask_head=dict(norm_cfg=head_norm_cfg))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py ================================================ _base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # use FP16 fp16 = dict(loss_scale=512.) ================================================ FILE: DLTA_AI_app/mmdetection/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py ================================================ _base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # Use RepeatDataset to speed up training # change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) data = dict(train=dict(times=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py ================================================ _base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa model = dict( backbone=dict( depths=[2, 2, 18, 2], init_cfg=dict(type='Pretrained', checkpoint=pretrained))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskRCNN', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[96, 192, 384, 768])) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict(warmup_iters=1000, step=[8, 11]) runner = dict(max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py ================================================ _base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py' # you need to set mode='dynamic' if you are using pytorch<=1.5.0 fp16 = dict(loss_scale=dict(init_scale=512)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py ================================================ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskRCNN', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[96, 192, 384, 768])) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict(warmup_iters=1000, step=[27, 33]) runner = dict(max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/swin/metafile.yml ================================================ Models: - Name: mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco In Collection: Mask R-CNN Config: configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py Metadata: Training Memory (GB): 11.9 Epochs: 36 Training Data: COCO Training Techniques: - AdamW Training Resources: 8x V100 GPUs Architecture: - Swin Transformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 48.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 43.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' README: configs/swin/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0 - Name: mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco In Collection: Mask R-CNN Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py Metadata: Training Memory (GB): 10.2 Epochs: 36 Training Data: COCO Training Techniques: - AdamW Training Resources: 8x V100 GPUs Architecture: - Swin Transformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' README: configs/swin/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0 - Name: mask_rcnn_swin-t-p4-w7_fpn_1x_coco In Collection: Mask R-CNN Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py Metadata: Training Memory (GB): 7.6 Epochs: 12 Training Data: COCO Training Techniques: - AdamW Training Resources: 8x V100 GPUs Architecture: - Swin Transformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' README: configs/swin/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0 - Name: mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco In Collection: Mask R-CNN Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py Metadata: Training Memory (GB): 7.8 Epochs: 36 Training Data: COCO Training Techniques: - AdamW Training Resources: 8x V100 GPUs Architecture: - Swin Transformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' README: configs/swin/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0 ================================================ FILE: DLTA_AI_app/mmdetection/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(1, 2, 3), # Please only add indices that would be used # in FPN, otherwise some parameter will not be used with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5)) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.20.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) model = dict( backbone=dict( _delete_=True, type='mmcls.TIMMBackbone', model_name='efficientnet_b1', features_only=True, pretrained=True, out_indices=(1, 2, 3, 4)), neck=dict(in_channels=[24, 40, 112, 320])) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/timm_example/retinanet_timm_tv_resnet50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.20.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) model = dict( backbone=dict( _delete_=True, type='mmcls.TIMMBackbone', model_name='tv_resnet50', # ResNet-50 with torchvision weights features_only=True, pretrained=True, out_indices=(1, 2, 3, 4))) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tood/metafile.yml ================================================ Collections: - Name: TOOD Metadata: Training Data: COCO Training Techniques: - SGD Training Resources: 8x V100 GPUs Architecture: - TOOD Paper: URL: https://arxiv.org/abs/2108.07755 Title: 'TOOD: Task-aligned One-stage Object Detection' README: configs/tood/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.20.0/mmdet/models/detectors/tood.py#L7 Version: v2.20.0 Models: - Name: tood_r101_fpn_mstrain_2x_coco In Collection: TOOD Config: configs/tood/tood_r101_fpn_mstrain_2x_coco.py Metadata: Training Memory (GB): 6.0 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth - Name: tood_x101_64x4d_fpn_mstrain_2x_coco In Collection: TOOD Config: configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py Metadata: Training Memory (GB): 10.2 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth - Name: tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco In Collection: TOOD Config: configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py Metadata: Training Memory (GB): 6.2 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth - Name: tood_r50_fpn_anchor_based_1x_coco In Collection: TOOD Config: configs/tood/tood_r50_fpn_anchor_based_1x_coco.py Metadata: Training Memory (GB): 4.1 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth - Name: tood_r50_fpn_1x_coco In Collection: TOOD Config: configs/tood/tood_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.1 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth - Name: tood_r50_fpn_mstrain_2x_coco In Collection: TOOD Config: configs/tood/tood_r50_fpn_mstrain_2x_coco.py Metadata: Training Memory (GB): 4.1 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py ================================================ _base_ = './tood_r101_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), bbox_head=dict(num_dcn=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tood/tood_r101_fpn_mstrain_2x_coco.py ================================================ _base_ = './tood_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tood/tood_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='TOOD', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='TOODHead', num_classes=80, in_channels=256, stacked_convs=6, feat_channels=256, anchor_type='anchor_free', anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), initial_loss_cls=dict( type='FocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input gamma=2.0, alpha=0.25, loss_weight=1.0), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), train_cfg=dict( initial_epoch=4, initial_assigner=dict(type='ATSSAssigner', topk=9), assigner=dict(type='TaskAlignedAssigner', topk=13), alpha=1, beta=6, allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # custom hooks custom_hooks = [dict(type='SetEpochInfoHook')] ================================================ FILE: DLTA_AI_app/mmdetection/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py ================================================ _base_ = './tood_r50_fpn_1x_coco.py' model = dict(bbox_head=dict(anchor_type='anchor_based')) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tood/tood_r50_fpn_mstrain_2x_coco.py ================================================ _base_ = './tood_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) # multi-scale training img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py ================================================ _base_ = './tood_x101_64x4d_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, False, True, True), ), bbox_head=dict(num_dcn=2)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py ================================================ _base_ = './tood_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tridentnet/metafile.yml ================================================ Collections: - Name: TridentNet Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNet - TridentNet Block Paper: URL: https://arxiv.org/abs/1901.01892 Title: 'Scale-Aware Trident Networks for Object Detection' README: configs/tridentnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/detectors/trident_faster_rcnn.py#L6 Version: v2.8.0 Models: - Name: tridentnet_r50_caffe_1x_coco In Collection: TridentNet Config: configs/tridentnet/tridentnet_r50_caffe_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth - Name: tridentnet_r50_caffe_mstrain_1x_coco In Collection: TridentNet Config: configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth - Name: tridentnet_r50_caffe_mstrain_3x_coco In Collection: TridentNet Config: configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py ================================================ _base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='TridentFasterRCNN', backbone=dict( type='TridentResNet', trident_dilations=(1, 2, 3), num_branch=3, test_branch_idx=1, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1), train_cfg=dict( rpn_proposal=dict(max_per_img=500), rcnn=dict( sampler=dict(num=128, pos_fraction=0.5, add_gt_as_proposals=False)))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py ================================================ _base_ = 'tridentnet_r50_caffe_1x_coco.py' # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] data = dict(train=dict(pipeline=train_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py ================================================ _base_ = 'tridentnet_r50_caffe_mstrain_1x_coco.py' lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/metafile.yml ================================================ Collections: - Name: VFNet Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet - Varifocal Loss Paper: URL: https://arxiv.org/abs/2008.13367 Title: 'VarifocalNet: An IoU-aware Dense Object Detector' README: configs/vfnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.6.0/mmdet/models/detectors/vfnet.py#L6 Version: v2.6.0 Models: - Name: vfnet_r50_fpn_1x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth - Name: vfnet_r50_fpn_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth - Name: vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 48.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth - Name: vfnet_r101_fpn_1x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r101_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth - Name: vfnet_r101_fpn_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth - Name: vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth - Name: vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth - Name: vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r101_fpn_1x_coco.py ================================================ _base_ = './vfnet_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r101_fpn_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r50_fpn_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='VFNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', # use P5 num_outs=5, relu_before_extra_convs=True), bbox_head=dict( type='VFNetHead', num_classes=80, in_channels=256, stacked_convs=3, feat_channels=256, strides=[8, 16, 32, 64, 128], center_sampling=False, dcn_on_last_conv=False, use_atss=True, use_vfl=True, loss_cls=dict( type='VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.5), loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # data setting dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.1, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), bbox_head=dict(dcn_on_last_conv=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 960)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py ================================================ _base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/wider_face/ssd300_wider_face.py ================================================ _base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=1)) # optimizer optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[16, 20]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=24) log_config = dict(interval=1) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolact/metafile.yml ================================================ Collections: - Name: YOLACT Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet Paper: URL: https://arxiv.org/abs/1904.02689 Title: 'YOLACT: Real-time Instance Segmentation' README: configs/yolact/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/yolact.py#L9 Version: v2.5.0 Models: - Name: yolact_r50_1x8_coco In Collection: YOLACT Config: configs/yolact/yolact_r50_1x8_coco.py Metadata: Training Resources: 1x V100 GPU Batch Size: 8 inference time (ms/im): - value: 23.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (550, 550) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 29.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth - Name: yolact_r50_8x8_coco In Collection: YOLACT Config: configs/yolact/yolact_r50_8x8_coco.py Metadata: Batch Size: 64 inference time (ms/im): - value: 23.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (550, 550) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 28.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth - Name: yolact_r101_1x8_coco In Collection: YOLACT Config: configs/yolact/yolact_r101_1x8_coco.py Metadata: Training Resources: 1x V100 GPU Batch Size: 8 inference time (ms/im): - value: 29.85 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (550, 550) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 30.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolact/yolact_r101_1x8_coco.py ================================================ _base_ = './yolact_r50_1x8_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolact/yolact_r50_1x8_coco.py ================================================ _base_ = '../_base_/default_runtime.py' # model settings img_size = 550 model = dict( type='YOLACT', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=-1, # do not freeze stem norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, # update the statistics of bn zero_init_residual=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5, upsample_cfg=dict(mode='bilinear')), bbox_head=dict( type='YOLACTHead', num_classes=80, in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, base_sizes=[8, 16, 32, 64, 128], ratios=[0.5, 1.0, 2.0], strides=[550.0 / x for x in [69, 35, 18, 9, 5]], centers=[(550 * 0.5 / x, 550 * 0.5 / x) for x in [69, 35, 18, 9, 5]]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs=1, num_protos=32, use_ohem=True), mask_head=dict( type='YOLACTProtonet', in_channels=256, num_protos=32, num_classes=80, max_masks_to_train=100, loss_mask_weight=6.125), segm_head=dict( type='YOLACTSegmHead', num_classes=80, in_channels=256, loss_segm=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), # smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, iou_thr=0.5, top_k=200, max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(img_size, img_size), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(img_size, img_size), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.1, step=[20, 42, 49, 52]) runner = dict(type='EpochBasedRunner', max_epochs=55) cudnn_benchmark = True evaluation = dict(metric=['bbox', 'segm']) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (1 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=8) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolact/yolact_r50_8x8_coco.py ================================================ _base_ = 'yolact_r50_1x8_coco.py' optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[20, 42, 49, 52]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolo/metafile.yml ================================================ Collections: - Name: YOLOv3 Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - DarkNet Paper: URL: https://arxiv.org/abs/1804.02767 Title: 'YOLOv3: An Incremental Improvement' README: configs/yolo/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/yolo.py#L8 Version: v2.4.0 Models: - Name: yolov3_d53_320_273e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_d53_320_273e_coco.py Metadata: Training Memory (GB): 2.7 inference time (ms/im): - value: 15.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (320, 320) Epochs: 273 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 27.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth - Name: yolov3_d53_mstrain-416_273e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_d53_mstrain-416_273e_coco.py Metadata: Training Memory (GB): 3.8 inference time (ms/im): - value: 16.34 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (416, 416) Epochs: 273 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 30.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth - Name: yolov3_d53_mstrain-608_273e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_d53_mstrain-608_273e_coco.py Metadata: Training Memory (GB): 7.4 inference time (ms/im): - value: 20.79 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (608, 608) Epochs: 273 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 33.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth - Name: yolov3_d53_fp16_mstrain-608_273e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py Metadata: Training Memory (GB): 4.7 inference time (ms/im): - value: 20.79 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (608, 608) Epochs: 273 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 33.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth - Name: yolov3_mobilenetv2_320_300e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_mobilenetv2_320_300e_coco.py Metadata: Training Memory (GB): 3.2 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 22.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth - Name: yolov3_mobilenetv2_mstrain-416_300e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py Metadata: Training Memory (GB): 5.3 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 23.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolo/yolov3_d53_320_273e_coco.py ================================================ _base_ = './yolov3_d53_mstrain-608_273e_coco.py' # dataset settings img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(320, 320), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py ================================================ _base_ = './yolov3_d53_mstrain-608_273e_coco.py' # fp16 settings fp16 = dict(loss_scale='dynamic') ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py ================================================ _base_ = './yolov3_d53_mstrain-608_273e_coco.py' # dataset settings img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=[(320, 320), (416, 416)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(416, 416), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py ================================================ _base_ = '../_base_/default_runtime.py' # model settings model = dict( type='YOLOV3', backbone=dict( type='Darknet', depth=53, out_indices=(3, 4, 5), init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')), neck=dict( type='YOLOV3Neck', num_scales=3, in_channels=[1024, 512, 256], out_channels=[512, 256, 128]), bbox_head=dict( type='YOLOV3Head', num_classes=80, in_channels=[512, 256, 128], out_channels=[1024, 512, 256], anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0, reduction='sum'), loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), # training and testing settings train_cfg=dict( assigner=dict( type='GridAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(608, 608), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=2000, # same as burn-in in darknet warmup_ratio=0.1, step=[218, 246]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=273) evaluation = dict(interval=1, metric=['bbox']) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py ================================================ _base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py'] # yapf:disable model = dict( bbox_head=dict( anchor_generator=dict( base_sizes=[[(220, 125), (128, 222), (264, 266)], [(35, 87), (102, 96), (60, 170)], [(10, 15), (24, 36), (72, 42)]]))) # yapf:enable # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(320, 320), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py ================================================ _base_ = '../_base_/default_runtime.py' # model settings model = dict( type='YOLOV3', backbone=dict( type='MobileNetV2', out_indices=(2, 4, 6), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')), neck=dict( type='YOLOV3Neck', num_scales=3, in_channels=[320, 96, 32], out_channels=[96, 96, 96]), bbox_head=dict( type='YOLOV3Head', num_classes=80, in_channels=[96, 96, 96], out_channels=[96, 96, 96], anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0, reduction='sum'), loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), # training and testing settings train_cfg=dict( assigner=dict( type='GridAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict( type='Resize', img_scale=[(320, 320), (416, 416)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(416, 416), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=24, workers_per_gpu=4, train=dict( type='RepeatDataset', # use RepeatDataset to speed up training times=10, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=4000, warmup_ratio=0.0001, step=[24, 28]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=30) evaluation = dict(interval=1, metric=['bbox']) find_unused_parameters = True # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (24 samples per GPU) auto_scale_lr = dict(base_batch_size=192) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolof/metafile.yml ================================================ Collections: - Name: YOLOF Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Dilated Encoder - ResNet Paper: URL: https://arxiv.org/abs/2103.09460 Title: 'You Only Look One-level Feature' README: configs/yolof/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/yolof.py#L6 Version: v2.12.0 Models: - Name: yolof_r50_c5_8x8_1x_coco In Collection: YOLOF Config: configs/yolof/yolof_r50_c5_8x8_1x_coco.py Metadata: Training Memory (GB): 8.3 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolof/yolof_r50_c5_8x8_1x_coco.py ================================================ _base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='YOLOF', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet50_caffe')), neck=dict( type='DilatedEncoder', in_channels=2048, out_channels=512, block_mid_channels=128, num_residual_blocks=4, block_dilations=[2, 4, 6, 8]), bbox_head=dict( type='YOLOFHead', num_classes=80, in_channels=512, reg_decoded_bbox=True, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[1, 2, 4, 8, 16], strides=[32]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1., 1., 1., 1.], add_ctr_clamp=True, ctr_clamp=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict( type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict( type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict( norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)})) lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=8, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolof/yolof_r50_c5_8x8_iter-1x_coco.py ================================================ _base_ = './yolof_r50_c5_8x8_1x_coco.py' # We implemented the iter-based config according to the source code. # COCO dataset has 117266 images after filtering. We use 8 gpu and # 8 batch size training, so 22500 is equivalent to # 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch, # 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large, # the iter-based and epoch-based setting have about 0.2 difference on # the mAP evaluation value. lr_config = dict(step=[15000, 20000]) runner = dict(_delete_=True, type='IterBasedRunner', max_iters=22500) checkpoint_config = dict(interval=2500) evaluation = dict(interval=4500) log_config = dict(interval=20) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolox/metafile.yml ================================================ Collections: - Name: YOLOX Metadata: Training Data: COCO Training Techniques: - SGD with Nesterov - Weight Decay - Cosine Annealing Lr Updater Training Resources: 8x TITANXp GPUs Architecture: - CSPDarkNet - PAFPN Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'YOLOX: Exceeding YOLO Series in 2021' README: configs/yolox/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.15.1/mmdet/models/detectors/yolox.py#L6 Version: v2.15.1 Models: - Name: yolox_s_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_s_8x8_300e_coco.py Metadata: Training Memory (GB): 7.6 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth - Name: yolox_l_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_l_8x8_300e_coco.py Metadata: Training Memory (GB): 19.9 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth - Name: yolox_x_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_x_8x8_300e_coco.py Metadata: Training Memory (GB): 28.1 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth - Name: yolox_tiny_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_tiny_8x8_300e_coco.py Metadata: Training Memory (GB): 3.5 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 32.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolox/yolox_l_8x8_300e_coco.py ================================================ _base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=1.0, widen_factor=1.0), neck=dict( in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3), bbox_head=dict(in_channels=256, feat_channels=256)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolox/yolox_m_8x8_300e_coco.py ================================================ _base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=0.67, widen_factor=0.75), neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), bbox_head=dict(in_channels=192, feat_channels=192), ) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolox/yolox_nano_8x8_300e_coco.py ================================================ _base_ = './yolox_tiny_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=0.33, widen_factor=0.25, use_depthwise=True), neck=dict( in_channels=[64, 128, 256], out_channels=64, num_csp_blocks=1, use_depthwise=True), bbox_head=dict(in_channels=64, feat_channels=64, use_depthwise=True)) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolox/yolox_s_8x8_300e_coco.py ================================================ _base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] img_scale = (640, 640) # height, width # model settings model = dict( type='YOLOX', input_size=img_scale, random_size_range=(15, 25), random_size_interval=10, backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5), neck=dict( type='YOLOXPAFPN', in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict( type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128), train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), # In order to align the source code, the threshold of the val phase is # 0.01, and the threshold of the test phase is 0.001. test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) # dataset settings data_root = 'data/coco/' dataset_type = 'CocoDataset' train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.1, 2), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict( type='MixUp', img_scale=img_scale, ratio_range=(0.8, 1.6), pad_val=114.0), dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', flip_ratio=0.5), # According to the official implementation, multi-scale # training is not considered here but in the # 'mmdet/models/detectors/yolox.py'. dict(type='Resize', img_scale=img_scale, keep_ratio=True), dict( type='Pad', pad_to_square=True, # If the image is three-channel, the pad value needs # to be set separately for each channel. pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] train_dataset = dict( type='MultiImageMixDataset', dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True) ], filter_empty_gt=False, ), pipeline=train_pipeline) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=img_scale, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, persistent_workers=True, train=train_dataset, val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer # default 8 gpu optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True, paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=None) max_epochs = 300 num_last_epochs = 15 resume_from = None interval = 10 # learning policy lr_config = dict( _delete_=True, policy='YOLOX', warmup='exp', by_epoch=False, warmup_by_epoch=True, warmup_ratio=1, warmup_iters=5, # 5 epoch num_last_epochs=num_last_epochs, min_lr_ratio=0.05) runner = dict(type='EpochBasedRunner', max_epochs=max_epochs) custom_hooks = [ dict( type='YOLOXModeSwitchHook', num_last_epochs=num_last_epochs, priority=48), dict( type='SyncNormHook', num_last_epochs=num_last_epochs, interval=interval, priority=48), dict( type='ExpMomentumEMAHook', resume_from=resume_from, momentum=0.0001, priority=49) ] checkpoint_config = dict(interval=interval) evaluation = dict( save_best='auto', # The evaluation interval is 'interval' when running epoch is # less than ‘max_epochs - num_last_epochs’. # The evaluation interval is 1 when running epoch is greater than # or equal to ‘max_epochs - num_last_epochs’. interval=interval, dynamic_intervals=[(max_epochs - num_last_epochs, 1)], metric='bbox') log_config = dict(interval=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolox/yolox_tiny_8x8_300e_coco.py ================================================ _base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) img_scale = (640, 640) # height, width train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.5, 1.5), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Resize', img_scale=img_scale, keep_ratio=True), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(416, 416), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] train_dataset = dict(pipeline=train_pipeline) data = dict( train=train_dataset, val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64) ================================================ FILE: DLTA_AI_app/mmdetection/configs/yolox/yolox_x_8x8_300e_coco.py ================================================ _base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=1.33, widen_factor=1.25), neck=dict( in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), bbox_head=dict(in_channels=320, feat_channels=320)) ================================================ FILE: DLTA_AI_app/mmdetection/docs/en/Makefile ================================================ # Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: DLTA_AI_app/mmdetection/docs/en/_static/css/readthedocs.css ================================================ .header-logo { background-image: url("../image/mmdet-logo.png"); background-size: 156px 40px; height: 40px; width: 156px; } ================================================ FILE: DLTA_AI_app/mmdetection/docs/en/api.rst ================================================ mmdet.apis -------------- .. automodule:: mmdet.apis :members: mmdet.core -------------- anchor ^^^^^^^^^^ .. automodule:: mmdet.core.anchor :members: bbox ^^^^^^^^^^ .. automodule:: mmdet.core.bbox :members: export ^^^^^^^^^^ .. automodule:: mmdet.core.export :members: mask ^^^^^^^^^^ .. automodule:: mmdet.core.mask :members: evaluation ^^^^^^^^^^ .. automodule:: mmdet.core.evaluation :members: post_processing ^^^^^^^^^^^^^^^ .. automodule:: mmdet.core.post_processing :members: utils ^^^^^^^^^^ .. automodule:: mmdet.core.utils :members: mmdet.datasets -------------- datasets ^^^^^^^^^^ .. automodule:: mmdet.datasets :members: pipelines ^^^^^^^^^^ .. automodule:: mmdet.datasets.pipelines :members: samplers ^^^^^^^^^^ .. automodule:: mmdet.datasets.samplers :members: api_wrappers ^^^^^^^^^^^^ .. automodule:: mmdet.datasets.api_wrappers :members: mmdet.models -------------- detectors ^^^^^^^^^^ .. automodule:: mmdet.models.detectors :members: backbones ^^^^^^^^^^ .. automodule:: mmdet.models.backbones :members: necks ^^^^^^^^^^^^ .. automodule:: mmdet.models.necks :members: dense_heads ^^^^^^^^^^^^ .. automodule:: mmdet.models.dense_heads :members: roi_heads ^^^^^^^^^^ .. automodule:: mmdet.models.roi_heads :members: losses ^^^^^^^^^^ .. automodule:: mmdet.models.losses :members: utils ^^^^^^^^^^ .. automodule:: mmdet.models.utils :members: mmdet.utils -------------- .. automodule::mmdet.utils :members: ================================================ FILE: DLTA_AI_app/mmdetection/docs/en/conf.py ================================================ # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import subprocess import sys import pytorch_sphinx_theme sys.path.insert(0, os.path.abspath('../..')) # -- Project information ----------------------------------------------------- project = 'MMDetection' copyright = '2018-2021, OpenMMLab' author = 'MMDetection Authors' version_file = '../../mmdet/version.py' def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] # The full version, including alpha/beta/rc tags release = get_version() # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'myst_parser', 'sphinx_markdown_tables', 'sphinx_copybutton', ] myst_enable_extensions = ['colon_fence'] myst_heading_anchors = 3 autodoc_mock_imports = [ 'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = { '.rst': 'restructuredtext', '.md': 'markdown', } # The master toctree document. master_doc = 'index' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'sphinx_rtd_theme' html_theme = 'pytorch_sphinx_theme' html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] html_theme_options = { 'menu': [ { 'name': 'GitHub', 'url': 'https://github.com/open-mmlab/mmdetection' }, ], # Specify the language of shared menu 'menu_lang': 'en' } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = ['css/readthedocs.css'] # -- Extension configuration ------------------------------------------------- # Ignore >>> when copying code copybutton_prompt_text = r'>>> |\.\.\. ' copybutton_prompt_is_regexp = True def builder_inited_handler(app): subprocess.run(['./stat.py']) def setup(app): app.connect('builder-inited', builder_inited_handler) ================================================ FILE: DLTA_AI_app/mmdetection/docs/en/index.rst ================================================ Welcome to MMDetection's documentation! ======================================= .. toctree:: :maxdepth: 2 :caption: Get Started get_started.md modelzoo_statistics.md model_zoo.md .. toctree:: :maxdepth: 2 :caption: Quick Run 1_exist_data_model.md 2_new_data_model.md 3_exist_data_new_model.md .. toctree:: :maxdepth: 2 :caption: Tutorials tutorials/index.rst .. toctree:: :maxdepth: 2 :caption: Useful Tools and Scripts useful_tools.md .. toctree:: :maxdepth: 2 :caption: Notes conventions.md compatibility.md projects.md changelog.md faq.md .. toctree:: :caption: Switch Language switch_language.md .. toctree:: :maxdepth: 1 :caption: API Reference api.rst .. toctree:: :maxdepth: 1 :caption: Device Support device/npu.md Indices and tables ================== * :ref:`genindex` * :ref:`search` ================================================ FILE: DLTA_AI_app/mmdetection/docs/en/make.bat ================================================ @ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd ================================================ FILE: DLTA_AI_app/mmdetection/docs/en/stat.py ================================================ #!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/configs' files = sorted(glob.glob('../../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f.replace('../../configs', url_prefix)) with open(f, 'r') as content_file: content = content_file.read() title = content.split('\n')[0].replace('# ', '').strip() ckpts = set(x.lower().strip() for x in re.findall(r'\[model\]\((https?.*)\)', content)) if len(ckpts) == 0: continue _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] assert len(_papertype) > 0 papertype = _papertype[0] paper = set([(papertype, title)]) titles.append(title) num_ckpts += len(ckpts) statsmsg = f""" \t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) """ stats.append((paper, ckpts, statsmsg)) allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) msglist = '\n'.join(x for _, _, x in stats) papertypes, papercounts = np.unique([t for t, _ in allpapers], return_counts=True) countstr = '\n'.join( [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) modelzoo = f""" # Model Zoo Statistics * Number of papers: {len(set(titles))} {countstr} * Number of checkpoints: {num_ckpts} {msglist} """ with open('modelzoo_statistics.md', 'w') as f: f.write(modelzoo) ================================================ FILE: DLTA_AI_app/mmdetection/docs/en/tutorials/index.rst ================================================ .. toctree:: :maxdepth: 2 config.md customize_dataset.md data_pipeline.md customize_models.md customize_runtime.md customize_losses.md finetune.md robustness_benchmarking.md pytorch2onnx.md onnx2tensorrt.md init_cfg.md how_to.md test_results_submission.md useful_hooks.md ================================================ FILE: DLTA_AI_app/mmdetection/docs/zh_cn/Makefile ================================================ # Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: DLTA_AI_app/mmdetection/docs/zh_cn/_static/css/readthedocs.css ================================================ .header-logo { background-image: url("../image/mmdet-logo.png"); background-size: 156px 40px; height: 40px; width: 156px; } ================================================ FILE: DLTA_AI_app/mmdetection/docs/zh_cn/api.rst ================================================ mmdet.apis -------------- .. automodule:: mmdet.apis :members: mmdet.core -------------- anchor ^^^^^^^^^^ .. automodule:: mmdet.core.anchor :members: bbox ^^^^^^^^^^ .. automodule:: mmdet.core.bbox :members: export ^^^^^^^^^^ .. automodule:: mmdet.core.export :members: mask ^^^^^^^^^^ .. automodule:: mmdet.core.mask :members: evaluation ^^^^^^^^^^ .. automodule:: mmdet.core.evaluation :members: post_processing ^^^^^^^^^^^^^^^ .. automodule:: mmdet.core.post_processing :members: utils ^^^^^^^^^^ .. automodule:: mmdet.core.utils :members: mmdet.datasets -------------- datasets ^^^^^^^^^^ .. automodule:: mmdet.datasets :members: pipelines ^^^^^^^^^^ .. automodule:: mmdet.datasets.pipelines :members: samplers ^^^^^^^^^^ .. automodule:: mmdet.datasets.samplers :members: api_wrappers ^^^^^^^^^^ .. automodule:: mmdet.datasets.api_wrappers :members: mmdet.models -------------- detectors ^^^^^^^^^^ .. automodule:: mmdet.models.detectors :members: backbones ^^^^^^^^^^ .. automodule:: mmdet.models.backbones :members: necks ^^^^^^^^^^^^ .. automodule:: mmdet.models.necks :members: dense_heads ^^^^^^^^^^^^ .. automodule:: mmdet.models.dense_heads :members: roi_heads ^^^^^^^^^^ .. automodule:: mmdet.models.roi_heads :members: losses ^^^^^^^^^^ .. automodule:: mmdet.models.losses :members: utils ^^^^^^^^^^ .. automodule:: mmdet.models.utils :members: mmdet.utils -------------- .. automodule::mmdet.utils :members: ================================================ FILE: DLTA_AI_app/mmdetection/docs/zh_cn/conf.py ================================================ # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import subprocess import sys import pytorch_sphinx_theme sys.path.insert(0, os.path.abspath('../../')) # -- Project information ----------------------------------------------------- project = 'MMDetection' copyright = '2018-2021, OpenMMLab' author = 'MMDetection Authors' version_file = '../../mmdet/version.py' def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] # The full version, including alpha/beta/rc tags release = get_version() # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'myst_parser', 'sphinx_markdown_tables', 'sphinx_copybutton', ] myst_enable_extensions = ['colon_fence'] myst_heading_anchors = 3 autodoc_mock_imports = [ 'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = { '.rst': 'restructuredtext', '.md': 'markdown', } # The master toctree document. master_doc = 'index' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'sphinx_rtd_theme' html_theme = 'pytorch_sphinx_theme' html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] html_theme_options = { 'menu': [ { 'name': 'GitHub', 'url': 'https://github.com/open-mmlab/mmdetection' }, ], # Specify the language of shared menu 'menu_lang': 'cn', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = ['css/readthedocs.css'] language = 'zh_CN' # -- Extension configuration ------------------------------------------------- # Ignore >>> when copying code copybutton_prompt_text = r'>>> |\.\.\. ' copybutton_prompt_is_regexp = True def builder_inited_handler(app): subprocess.run(['./stat.py']) def setup(app): app.connect('builder-inited', builder_inited_handler) ================================================ FILE: DLTA_AI_app/mmdetection/docs/zh_cn/index.rst ================================================ Welcome to MMDetection's documentation! ======================================= .. toctree:: :maxdepth: 2 :caption: 开始你的第一步 get_started.md model_zoo.md article.md .. toctree:: :maxdepth: 2 :caption: 快速启动 1_exist_data_model.md 2_new_data_model.md 3_exist_data_new_model.md .. toctree:: :maxdepth: 2 :caption: 教程 tutorials/index.rst .. toctree:: :maxdepth: 2 :caption: 实用工具与脚本 useful_tools.md .. toctree:: :maxdepth: 2 :caption: 说明 conventions.md compatibility.md faq.md .. toctree:: :caption: 语言切换 switch_language.md .. toctree:: :maxdepth: 1 :caption: 接口文档(英文) api.rst Indices and tables ================== * :ref:`genindex` * :ref:`search` ================================================ FILE: DLTA_AI_app/mmdetection/docs/zh_cn/make.bat ================================================ @ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd ================================================ FILE: DLTA_AI_app/mmdetection/docs/zh_cn/stat.py ================================================ #!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/' files = sorted(glob.glob('../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f.replace('../', url_prefix)) with open(f, 'r') as content_file: content = content_file.read() title = content.split('\n')[0].replace('# ', '').strip() ckpts = set(x.lower().strip() for x in re.findall(r'\[model\]\((https?.*)\)', content)) if len(ckpts) == 0: continue _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] assert len(_papertype) > 0 papertype = _papertype[0] paper = set([(papertype, title)]) titles.append(title) num_ckpts += len(ckpts) statsmsg = f""" \t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) """ stats.append((paper, ckpts, statsmsg)) allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) msglist = '\n'.join(x for _, _, x in stats) papertypes, papercounts = np.unique([t for t, _ in allpapers], return_counts=True) countstr = '\n'.join( [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) modelzoo = f""" # Model Zoo Statistics * Number of papers: {len(set(titles))} {countstr} * Number of checkpoints: {num_ckpts} {msglist} """ with open('modelzoo_statistics.md', 'w') as f: f.write(modelzoo) ================================================ FILE: DLTA_AI_app/mmdetection/docs/zh_cn/tutorials/index.rst ================================================ .. toctree:: :maxdepth: 2 config.md customize_dataset.md data_pipeline.md customize_models.md customize_runtime.md customize_losses.md finetune.md pytorch2onnx.md onnx2tensorrt.md init_cfg.md how_to.md ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') digit_version.append(int(patch_version[0]) - 1) digit_version.append(int(patch_version[1])) return digit_version mmcv_minimum_version = '1.3.17' mmcv_maximum_version = '1.8.0' mmcv_version = digit_version(mmcv.__version__) assert (mmcv_version >= digit_version(mmcv_minimum_version) and mmcv_version <= digit_version(mmcv_maximum_version)), \ f'MMCV=={mmcv.__version__} is used but incompatible. ' \ f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' __all__ = ['__version__', 'short_version'] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/apis/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .inference import (async_inference_detector, inference_detector, init_detector, show_result_pyplot) from .test import multi_gpu_test, single_gpu_test from .train import (get_root_logger, init_random_seed, set_random_seed, train_detector) __all__ = [ 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 'async_inference_detector', 'inference_detector', 'show_result_pyplot', 'multi_gpu_test', 'single_gpu_test', 'init_random_seed' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/apis/inference.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings from pathlib import Path import mmcv import numpy as np import torch from mmcv.ops import RoIPool from mmcv.parallel import collate, scatter from mmcv.runner import load_checkpoint from mmdet.core import get_classes from mmdet.datasets import replace_ImageToTensor from mmdet.datasets.pipelines import Compose from mmdet.models import build_detector def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): """Initialize a detector from config file. Args: config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. cfg_options (dict): Options to override some settings in the used config. Returns: nn.Module: The constructed detector. """ if isinstance(config, (str, Path)): config = mmcv.Config.fromfile(config) elif not isinstance(config, mmcv.Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') if cfg_options is not None: config.merge_from_dict(cfg_options) if 'pretrained' in config.model: config.model.pretrained = None elif 'init_cfg' in config.model.backbone: config.model.backbone.init_cfg = None config.model.train_cfg = None model = build_detector(config.model, test_cfg=config.get('test_cfg')) if checkpoint is not None: checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: warnings.simplefilter('once') warnings.warn('Class names are not saved in the checkpoint\'s ' 'meta data, use COCO classes by default.') model.CLASSES = get_classes('coco') model.cfg = config # save the config in the model for convenience model.to(device) model.eval() if device == 'npu': from mmcv.device.npu import NPUDataParallel model = NPUDataParallel(model) model.cfg = config return model class LoadImage: """Deprecated. A simple pipeline to load image. """ def __call__(self, results): """Call function to load images into results. Args: results (dict): A result dict contains the file name of the image to be read. Returns: dict: ``results`` will be returned containing loaded image. """ warnings.simplefilter('once') warnings.warn('`LoadImage` is deprecated and will be removed in ' 'future releases. You may use `LoadImageFromWebcam` ' 'from `mmdet.datasets.pipelines.` instead.') if isinstance(results['img'], str): results['filename'] = results['img'] results['ori_filename'] = results['img'] else: results['filename'] = None results['ori_filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_fields'] = ['img'] results['img_shape'] = img.shape results['ori_shape'] = img.shape return results def inference_detector(model, imgs): """Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]): Either image files or loaded images. Returns: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the detection results directly. """ if isinstance(imgs, (list, tuple)): is_batch = True else: imgs = [imgs] is_batch = False cfg = model.cfg device = next(model.parameters()).device # model device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) # just get the actual data from DataContainer data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] else: for m in model.modules(): assert not isinstance( m, RoIPool ), 'CPU inference with RoIPool is not supported currently.' # forward the model with torch.no_grad(): results = model(return_loss=False, rescale=True, **data) if not is_batch: return results[0] else: return results async def async_inference_detector(model, imgs): """Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results. """ if not isinstance(imgs, (list, tuple)): imgs = [imgs] cfg = model.cfg device = next(model.parameters()).device # model device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) # just get the actual data from DataContainer data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] else: for m in model.modules(): assert not isinstance( m, RoIPool ), 'CPU inference with RoIPool is not supported currently.' # We don't restore `torch.is_grad_enabled()` value during concurrent # inference since execution can overlap torch.set_grad_enabled(False) results = await model.aforward_test(rescale=True, **data) return results def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0, palette=None, out_file=None): """Visualize the detection results on the image. Args: model (nn.Module): The loaded detector. img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. score_thr (float): The threshold to visualize the bboxes and masks. title (str): Title of the pyplot figure. wait_time (float): Value of waitKey param. Default: 0. palette (str or tuple(int) or :obj:`Color`): Color. The tuple of color should be in BGR order. out_file (str or None): The path to write the image. Default: None. """ if hasattr(model, 'module'): model = model.module model.show_result( img, result, score_thr=score_thr, show=True, wait_time=wait_time, win_name=title, bbox_color=palette, text_color=(200, 200, 200), mask_color=palette, out_file=out_file) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/apis/test.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import pickle import shutil import tempfile import time import mmcv import torch import torch.distributed as dist from mmcv.image import tensor2imgs from mmcv.runner import get_dist_info from mmdet.core import encode_mask_results def single_gpu_test(model, data_loader, show=False, out_dir=None, show_score_thr=0.3): model.eval() results = [] dataset = data_loader.dataset PALETTE = getattr(dataset, 'PALETTE', None) prog_bar = mmcv.ProgressBar(len(dataset)) for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) batch_size = len(result) if show or out_dir: if batch_size == 1 and isinstance(data['img'][0], torch.Tensor): img_tensor = data['img'][0] else: img_tensor = data['img'][0].data[0] img_metas = data['img_metas'][0].data[0] imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) assert len(imgs) == len(img_metas) for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): h, w, _ = img_meta['img_shape'] img_show = img[:h, :w, :] ori_h, ori_w = img_meta['ori_shape'][:-1] img_show = mmcv.imresize(img_show, (ori_w, ori_h)) if out_dir: out_file = osp.join(out_dir, img_meta['ori_filename']) else: out_file = None model.module.show_result( img_show, result[i], bbox_color=PALETTE, text_color=PALETTE, mask_color=PALETTE, show=show, out_file=out_file, score_thr=show_score_thr) # encode mask results if isinstance(result[0], tuple): result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] # This logic is only used in panoptic segmentation test. elif isinstance(result[0], dict) and 'ins_results' in result[0]: for j in range(len(result)): bbox_results, mask_results = result[j]['ins_results'] result[j]['ins_results'] = (bbox_results, encode_mask_results(mask_results)) results.extend(result) for _ in range(batch_size): prog_bar.update() return results def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): """Test model with multiple gpus. This method tests model with multiple gpus and collects the results under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' it encodes results to gpu tensors and use gpu communication for results collection. On cpu mode it saves the results on different gpus to 'tmpdir' and collects them by the rank 0 worker. Args: model (nn.Module): Model to be tested. data_loader (nn.Dataloader): Pytorch data loader. tmpdir (str): Path of directory to save the temporary results from different gpus under cpu mode. gpu_collect (bool): Option to use either gpu or cpu to collect results. Returns: list: The prediction results. """ model.eval() results = [] dataset = data_loader.dataset rank, world_size = get_dist_info() if rank == 0: prog_bar = mmcv.ProgressBar(len(dataset)) time.sleep(2) # This line can prevent deadlock problem in some cases. for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) # encode mask results if isinstance(result[0], tuple): result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] # This logic is only used in panoptic segmentation test. elif isinstance(result[0], dict) and 'ins_results' in result[0]: for j in range(len(result)): bbox_results, mask_results = result[j]['ins_results'] result[j]['ins_results'] = ( bbox_results, encode_mask_results(mask_results)) results.extend(result) if rank == 0: batch_size = len(result) for _ in range(batch_size * world_size): prog_bar.update() # collect results from all ranks if gpu_collect: results = collect_results_gpu(results, len(dataset)) else: results = collect_results_cpu(results, len(dataset), tmpdir) return results def collect_results_cpu(result_part, size, tmpdir=None): rank, world_size = get_dist_info() # create a tmp dir if it is not specified if tmpdir is None: MAX_LEN = 512 # 32 is whitespace dir_tensor = torch.full((MAX_LEN, ), 32, dtype=torch.uint8, device='cuda') if rank == 0: mmcv.mkdir_or_exist('.dist_test') tmpdir = tempfile.mkdtemp(dir='.dist_test') tmpdir = torch.tensor( bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') dir_tensor[:len(tmpdir)] = tmpdir dist.broadcast(dir_tensor, 0) tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() else: mmcv.mkdir_or_exist(tmpdir) # dump the part result to the dir mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) dist.barrier() # collect all parts if rank != 0: return None else: # load results of all parts from tmp dir part_list = [] for i in range(world_size): part_file = osp.join(tmpdir, f'part_{i}.pkl') part_list.append(mmcv.load(part_file)) # sort the results ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] # remove tmp dir shutil.rmtree(tmpdir) return ordered_results def collect_results_gpu(result_part, size): rank, world_size = get_dist_info() # dump result part to tensor with pickle part_tensor = torch.tensor( bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') # gather all result part tensor shape shape_tensor = torch.tensor(part_tensor.shape, device='cuda') shape_list = [shape_tensor.clone() for _ in range(world_size)] dist.all_gather(shape_list, shape_tensor) # padding result part tensor to max length shape_max = torch.tensor(shape_list).max() part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') part_send[:shape_tensor[0]] = part_tensor part_recv_list = [ part_tensor.new_zeros(shape_max) for _ in range(world_size) ] # gather all result part dist.all_gather(part_recv_list, part_send) if rank == 0: part_list = [] for recv, shape in zip(part_recv_list, shape_list): part_list.append( pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) # sort the results ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] return ordered_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/apis/train.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os import random import numpy as np import torch import torch.distributed as dist from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, Fp16OptimizerHook, OptimizerHook, build_runner, get_dist_info) from mmdet.core import DistEvalHook, EvalHook, build_optimizer from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor) from mmdet.utils import (build_ddp, build_dp, compat_cfg, find_latest_checkpoint, get_root_logger) def init_random_seed(seed=None, device='cuda'): """Initialize random seed. If the seed is not set, the seed will be automatically randomized, and then broadcast to all processes to prevent some potential bugs. Args: seed (int, Optional): The seed. Default to None. device (str): The device where the seed will be put on. Default to 'cuda'. Returns: int: Seed to be used. """ if seed is not None: return seed # Make sure all ranks share the same random seed to prevent # some potential bugs. Please refer to # https://github.com/open-mmlab/mmdetection/issues/6339 rank, world_size = get_dist_info() seed = np.random.randint(2**31) if world_size == 1: return seed if rank == 0: random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item() def set_random_seed(seed, deterministic=False): """Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def auto_scale_lr(cfg, distributed, logger): """Automatically scaling LR according to GPU number and sample per GPU. Args: cfg (config): Training config. distributed (bool): Using distributed or not. logger (logging.Logger): Logger. """ # Get flag from config if ('auto_scale_lr' not in cfg) or \ (not cfg.auto_scale_lr.get('enable', False)): logger.info('Automatic scaling of learning rate (LR)' ' has been disabled.') return # Get base batch size from config base_batch_size = cfg.auto_scale_lr.get('base_batch_size', None) if base_batch_size is None: return # Get gpu number if distributed: _, world_size = get_dist_info() num_gpus = len(range(world_size)) else: num_gpus = len(cfg.gpu_ids) # calculate the batch size samples_per_gpu = cfg.data.train_dataloader.samples_per_gpu batch_size = num_gpus * samples_per_gpu logger.info(f'Training with {num_gpus} GPU(s) with {samples_per_gpu} ' f'samples per GPU. The total batch size is {batch_size}.') if batch_size != base_batch_size: # scale LR with # [linear scaling rule](https://arxiv.org/abs/1706.02677) scaled_lr = (batch_size / base_batch_size) * cfg.optimizer.lr logger.info('LR has been automatically scaled ' f'from {cfg.optimizer.lr} to {scaled_lr}') cfg.optimizer.lr = scaled_lr else: logger.info('The batch size match the ' f'base batch size: {base_batch_size}, ' f'will not scaling the LR ({cfg.optimizer.lr}).') def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): cfg = compat_cfg(cfg) logger = get_root_logger(log_level=cfg.log_level) # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[ 'type'] train_dataloader_default_args = dict( samples_per_gpu=2, workers_per_gpu=2, # `num_gpus` will be ignored if distributed num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=False) train_loader_cfg = { **train_dataloader_default_args, **cfg.data.get('train_dataloader', {}) } data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] # put model on gpus if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) # Sets the `find_unused_parameters` parameter in # torch.nn.parallel.DistributedDataParallel model = build_ddp( model, cfg.device, device_ids=[int(os.environ['LOCAL_RANK'])], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) # build optimizer auto_scale_lr(cfg, distributed, logger) optimizer = build_optimizer(model, cfg.optimizer) runner = build_runner( cfg.runner, default_args=dict( model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)) # an ugly workaround to make .log and .log.json filenames the same runner.timestamp = timestamp # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is None and cfg.get('device', None) == 'npu': fp16_cfg = dict(loss_scale='dynamic') if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook( **cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif distributed and 'type' not in cfg.optimizer_config: optimizer_config = OptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config # register hooks runner.register_training_hooks( cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None)) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook()) # register eval hooks if validate: val_dataloader_default_args = dict( samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False, persistent_workers=False) val_dataloader_args = { **val_dataloader_default_args, **cfg.data.get('val_dataloader', {}) } # Support batch_size > 1 in validation if val_dataloader_args['samples_per_gpu'] > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.val.pipeline = replace_ImageToTensor( cfg.data.val.pipeline) val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_dataloader = build_dataloader(val_dataset, **val_dataloader_args) eval_cfg = cfg.get('evaluation', {}) eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' eval_hook = DistEvalHook if distributed else EvalHook # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'. runner.register_hook( eval_hook(val_dataloader, **eval_cfg), priority='LOW') resume_from = None if cfg.resume_from is None and cfg.get('auto_resume'): resume_from = find_latest_checkpoint(cfg.work_dir) if resume_from is not None: cfg.resume_from = resume_from if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .anchor import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403 from .data_structures import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403 from .hook import * # noqa: F401, F403 from .mask import * # noqa: F401, F403 from .optimizers import * # noqa: F401, F403 from .post_processing import * # noqa: F401, F403 from .utils import * # noqa: F401, F403 ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, YOLOAnchorGenerator) from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS, build_anchor_generator, build_prior_generator) from .point_generator import MlvlPointGenerator, PointGenerator from .utils import anchor_inside_flags, calc_region, images_to_levels __all__ = [ 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', 'PointGenerator', 'images_to_levels', 'calc_region', 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator', 'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/anchor_generator.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv import numpy as np import torch from torch.nn.modules.utils import _pair from .builder import PRIOR_GENERATORS @PRIOR_GENERATORS.register_module() class AnchorGenerator: """Standard anchor generator for 2D anchor-based detectors. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels in order (w, h). ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int] | None): The basic sizes of anchors in multiple levels. If None is given, strides will be used as base_sizes. (If strides are non square, the shortest stride is taken.) scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. If a list of tuple of float is given, they will be used to shift the centers of anchors. center_offset (float): The offset of center in proportion to anchors' width and height. By default it is 0 in V2.0. Examples: >>> from mmdet.core import AnchorGenerator >>> self = AnchorGenerator([16], [1.], [1.], [9]) >>> all_anchors = self.grid_priors([(2, 2)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]])] >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) >>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]]), \ tensor([[-9., -9., 9., 9.]])] """ def __init__(self, strides, ratios, scales=None, base_sizes=None, scale_major=True, octave_base_scale=None, scales_per_octave=None, centers=None, center_offset=0.): # check center and center_offset if center_offset != 0: assert centers is None, 'center cannot be set when center_offset' \ f'!=0, {centers} is given.' if not (0 <= center_offset <= 1): raise ValueError('center_offset should be in range [0, 1], ' f'{center_offset} is given.') if centers is not None: assert len(centers) == len(strides), \ 'The number of strides should be the same as centers, got ' \ f'{strides} and {centers}' # calculate base sizes of anchors self.strides = [_pair(stride) for stride in strides] self.base_sizes = [min(stride) for stride in self.strides ] if base_sizes is None else base_sizes assert len(self.base_sizes) == len(self.strides), \ 'The number of strides should be the same as base sizes, got ' \ f'{self.strides} and {self.base_sizes}' # calculate scales of anchors assert ((octave_base_scale is not None and scales_per_octave is not None) ^ (scales is not None)), \ 'scales and octave_base_scale with scales_per_octave cannot' \ ' be set at the same time' if scales is not None: self.scales = torch.Tensor(scales) elif octave_base_scale is not None and scales_per_octave is not None: octave_scales = np.array( [2**(i / scales_per_octave) for i in range(scales_per_octave)]) scales = octave_scales * octave_base_scale self.scales = torch.Tensor(scales) else: raise ValueError('Either scales or octave_base_scale with ' 'scales_per_octave should be set') self.octave_base_scale = octave_base_scale self.scales_per_octave = scales_per_octave self.ratios = torch.Tensor(ratios) self.scale_major = scale_major self.centers = centers self.center_offset = center_offset self.base_anchors = self.gen_base_anchors() @property def num_base_anchors(self): """list[int]: total number of base anchors in a feature grid""" return self.num_base_priors @property def num_base_priors(self): """list[int]: The number of priors (anchors) at a point on the feature grid""" return [base_anchors.size(0) for base_anchors in self.base_anchors] @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.strides) def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors( base_size, scales=self.scales, ratios=self.ratios, center=center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_size, scales, ratios, center=None): """Generate base anchors of a single level. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between between the height and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ w = base_size h = base_size if center is None: x_center = self.center_offset * w y_center = self.center_offset * h else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, y_center + 0.5 * hs ] base_anchors = torch.stack(base_anchors, dim=-1) return base_anchors def _meshgrid(self, x, y, row_major=True): """Generate mesh grid of x and y. Args: x (torch.Tensor): Grids of x dimension. y (torch.Tensor): Grids of y dimension. row_major (bool, optional): Whether to return y grids first. Defaults to True. Returns: tuple[torch.Tensor]: The mesh grids of x and y. """ # use shape instead of len to keep tracing while exporting to onnx xx = x.repeat(y.shape[0]) yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) if row_major: return xx, yy else: return yy, xx def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'): """Generate grid anchors in multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels. dtype (:obj:`torch.dtype`): Dtype of priors. Default: torch.float32. device (str): The device where the anchors will be put on. Return: list[torch.Tensor]: Anchors in multiple feature levels. \ The sizes of each tensor should be [N, 4], where \ N = width * height * num_base_anchors, width and height \ are the sizes of the corresponding feature level, \ num_base_anchors is the number of anchors for that level. """ assert self.num_levels == len(featmap_sizes) multi_level_anchors = [] for i in range(self.num_levels): anchors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device) multi_level_anchors.append(anchors) return multi_level_anchors def single_level_grid_priors(self, featmap_size, level_idx, dtype=torch.float32, device='cuda'): """Generate grid anchors of a single level. Note: This function is usually called by method ``self.grid_priors``. Args: featmap_size (tuple[int]): Size of the feature maps. level_idx (int): The index of corresponding feature map level. dtype (obj:`torch.dtype`): Date type of points.Defaults to ``torch.float32``. device (str, optional): The device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. """ base_anchors = self.base_anchors[level_idx].to(device).to(dtype) feat_h, feat_w = featmap_size stride_w, stride_h = self.strides[level_idx] # First create Range with the default dtype, than convert to # target `dtype` for onnx exporting. shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... return all_anchors def sparse_priors(self, prior_idxs, featmap_size, level_idx, dtype=torch.float32, device='cuda'): """Generate sparse anchors according to the ``prior_idxs``. Args: prior_idxs (Tensor): The index of corresponding anchors in the feature map. featmap_size (tuple[int]): feature map size arrange as (h, w). level_idx (int): The level index of corresponding feature map. dtype (obj:`torch.dtype`): Date type of points.Defaults to ``torch.float32``. device (obj:`torch.device`): The device where the points is located. Returns: Tensor: Anchor with shape (N, 4), N should be equal to the length of ``prior_idxs``. """ height, width = featmap_size num_base_anchors = self.num_base_anchors[level_idx] base_anchor_id = prior_idxs % num_base_anchors x = (prior_idxs // num_base_anchors) % width * self.strides[level_idx][0] y = (prior_idxs // width // num_base_anchors) % height * self.strides[level_idx][1] priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \ self.base_anchors[level_idx][base_anchor_id, :].to(device) return priors def grid_anchors(self, featmap_sizes, device='cuda'): """Generate grid anchors in multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels. device (str): Device where the anchors will be put on. Return: list[torch.Tensor]: Anchors in multiple feature levels. \ The sizes of each tensor should be [N, 4], where \ N = width * height * num_base_anchors, width and height \ are the sizes of the corresponding feature level, \ num_base_anchors is the number of anchors for that level. """ warnings.warn('``grid_anchors`` would be deprecated soon. ' 'Please use ``grid_priors`` ') assert self.num_levels == len(featmap_sizes) multi_level_anchors = [] for i in range(self.num_levels): anchors = self.single_level_grid_anchors( self.base_anchors[i].to(device), featmap_sizes[i], self.strides[i], device=device) multi_level_anchors.append(anchors) return multi_level_anchors def single_level_grid_anchors(self, base_anchors, featmap_size, stride=(16, 16), device='cuda'): """Generate grid anchors of a single level. Note: This function is usually called by method ``self.grid_anchors``. Args: base_anchors (torch.Tensor): The base anchors of a feature grid. featmap_size (tuple[int]): Size of the feature maps. stride (tuple[int], optional): Stride of the feature map in order (w, h). Defaults to (16, 16). device (str, optional): Device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. """ warnings.warn( '``single_level_grid_anchors`` would be deprecated soon. ' 'Please use ``single_level_grid_priors`` ') # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly feat_h, feat_w = featmap_size shift_x = torch.arange(0, feat_w, device=device) * stride[0] shift_y = torch.arange(0, feat_h, device=device) * stride[1] shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) shifts = shifts.type_as(base_anchors) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... return all_anchors def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): """Generate valid flags of anchors in multiple feature levels. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels. pad_shape (tuple): The padded shape of the image. device (str): Device where the anchors will be put on. Return: list(torch.Tensor): Valid flags of anchors in multiple levels. """ assert self.num_levels == len(featmap_sizes) multi_level_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] feat_h, feat_w = featmap_sizes[i] h, w = pad_shape[:2] valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), self.num_base_anchors[i], device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, num_base_anchors, device='cuda'): """Generate the valid flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps, arrange as (h, w). valid_size (tuple[int]): The valid size of the feature maps. num_base_anchors (int): The number of base anchors. device (str, optional): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy valid = valid[:, None].expand(valid.size(0), num_base_anchors).contiguous().view(-1) return valid def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}octave_base_scale=' repr_str += f'{self.octave_base_scale},\n' repr_str += f'{indent_str}scales_per_octave=' repr_str += f'{self.scales_per_octave},\n' repr_str += f'{indent_str}num_levels={self.num_levels}\n' repr_str += f'{indent_str}centers={self.centers},\n' repr_str += f'{indent_str}center_offset={self.center_offset})' return repr_str @PRIOR_GENERATORS.register_module() class SSDAnchorGenerator(AnchorGenerator): """Anchor generator for SSD. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. min_sizes (list[float]): The list of minimum anchor sizes on each level. max_sizes (list[float]): The list of maximum anchor sizes on each level. basesize_ratio_range (tuple(float)): Ratio range of anchors. Being used when not setting min_sizes and max_sizes. input_size (int): Size of feature map, 300 for SSD300, 512 for SSD512. Being used when not setting min_sizes and max_sizes. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. It is always set to be False in SSD. """ def __init__(self, strides, ratios, min_sizes=None, max_sizes=None, basesize_ratio_range=(0.15, 0.9), input_size=300, scale_major=True): assert len(strides) == len(ratios) assert not (min_sizes is None) ^ (max_sizes is None) self.strides = [_pair(stride) for stride in strides] self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] if min_sizes is None and max_sizes is None: # use hard code to generate SSD anchors self.input_size = input_size assert mmcv.is_tuple_of(basesize_ratio_range, float) self.basesize_ratio_range = basesize_ratio_range # calculate anchor ratios and sizes min_ratio, max_ratio = basesize_ratio_range min_ratio = int(min_ratio * 100) max_ratio = int(max_ratio * 100) step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) min_sizes = [] max_sizes = [] for ratio in range(int(min_ratio), int(max_ratio) + 1, step): min_sizes.append(int(self.input_size * ratio / 100)) max_sizes.append(int(self.input_size * (ratio + step) / 100)) if self.input_size == 300: if basesize_ratio_range[0] == 0.15: # SSD300 COCO min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) elif basesize_ratio_range[0] == 0.2: # SSD300 VOC min_sizes.insert(0, int(self.input_size * 10 / 100)) max_sizes.insert(0, int(self.input_size * 20 / 100)) else: raise ValueError( 'basesize_ratio_range[0] should be either 0.15' 'or 0.2 when input_size is 300, got ' f'{basesize_ratio_range[0]}.') elif self.input_size == 512: if basesize_ratio_range[0] == 0.1: # SSD512 COCO min_sizes.insert(0, int(self.input_size * 4 / 100)) max_sizes.insert(0, int(self.input_size * 10 / 100)) elif basesize_ratio_range[0] == 0.15: # SSD512 VOC min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) else: raise ValueError( 'When not setting min_sizes and max_sizes,' 'basesize_ratio_range[0] should be either 0.1' 'or 0.15 when input_size is 512, got' f' {basesize_ratio_range[0]}.') else: raise ValueError( 'Only support 300 or 512 in SSDAnchorGenerator when ' 'not setting min_sizes and max_sizes, ' f'got {self.input_size}.') assert len(min_sizes) == len(max_sizes) == len(strides) anchor_ratios = [] anchor_scales = [] for k in range(len(self.strides)): scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] anchor_ratio = [1.] for r in ratios[k]: anchor_ratio += [1 / r, r] # 4 or 6 ratio anchor_ratios.append(torch.Tensor(anchor_ratio)) anchor_scales.append(torch.Tensor(scales)) self.base_sizes = min_sizes self.scales = anchor_scales self.ratios = anchor_ratios self.scale_major = scale_major self.center_offset = 0 self.base_anchors = self.gen_base_anchors() def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): base_anchors = self.gen_single_level_base_anchors( base_size, scales=self.scales[i], ratios=self.ratios[i], center=self.centers[i]) indices = list(range(len(self.ratios[i]))) indices.insert(1, len(indices)) base_anchors = torch.index_select(base_anchors, 0, torch.LongTensor(indices)) multi_level_base_anchors.append(base_anchors) return multi_level_base_anchors def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}input_size={self.input_size},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}num_levels={self.num_levels},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}basesize_ratio_range=' repr_str += f'{self.basesize_ratio_range})' return repr_str @PRIOR_GENERATORS.register_module() class LegacyAnchorGenerator(AnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. Note: Difference to the V2.0 anchor generator: 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. 2. The width/height are minused by 1 when calculating the anchors' \ centers and corners to meet the V1.x coordinate system. 3. The anchors' corners are quantized. Args: strides (list[int] | list[tuple[int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int]): The basic sizes of anchors in multiple levels. If None is given, strides will be used to generate base_sizes. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. It a list of float is given, this list will be used to shift the centers of anchors. center_offset (float): The offset of center in proportion to anchors' width and height. By default it is 0.5 in V2.0 but it should be 0.5 in v1.x models. Examples: >>> from mmdet.core import LegacyAnchorGenerator >>> self = LegacyAnchorGenerator( >>> [16], [1.], [1.], [9], center_offset=0.5) >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') >>> print(all_anchors) [tensor([[ 0., 0., 8., 8.], [16., 0., 24., 8.], [ 0., 16., 8., 24.], [16., 16., 24., 24.]])] """ def gen_single_level_base_anchors(self, base_size, scales, ratios, center=None): """Generate base anchors of a single level. Note: The width/height of anchors are minused by 1 when calculating \ the centers and corners to meet the V1.x coordinate system. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between between the height. and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature map. """ w = base_size h = base_size if center is None: x_center = self.center_offset * (w - 1) y_center = self.center_offset * (h - 1) else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) ] base_anchors = torch.stack(base_anchors, dim=-1).round() return base_anchors @PRIOR_GENERATORS.register_module() class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` can be found in `LegacyAnchorGenerator`. """ def __init__(self, strides, ratios, basesize_ratio_range, input_size=300, scale_major=True): super(LegacySSDAnchorGenerator, self).__init__( strides=strides, ratios=ratios, basesize_ratio_range=basesize_ratio_range, input_size=input_size, scale_major=scale_major) self.centers = [((stride - 1) / 2., (stride - 1) / 2.) for stride in strides] self.base_anchors = self.gen_base_anchors() @PRIOR_GENERATORS.register_module() class YOLOAnchorGenerator(AnchorGenerator): """Anchor generator for YOLO. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. base_sizes (list[list[tuple[int, int]]]): The basic sizes of anchors in multiple levels. """ def __init__(self, strides, base_sizes): self.strides = [_pair(stride) for stride in strides] self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] self.base_sizes = [] num_anchor_per_level = len(base_sizes[0]) for base_sizes_per_level in base_sizes: assert num_anchor_per_level == len(base_sizes_per_level) self.base_sizes.append( [_pair(base_size) for base_size in base_sizes_per_level]) self.base_anchors = self.gen_base_anchors() @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.base_sizes) def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_sizes_per_level in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors(base_sizes_per_level, center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_sizes_per_level, center=None): """Generate base anchors of a single level. Args: base_sizes_per_level (list[tuple[int, int]]): Basic sizes of anchors. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ x_center, y_center = center base_anchors = [] for base_size in base_sizes_per_level: w, h = base_size # use float anchor and the anchor's center is aligned with the # pixel center base_anchor = torch.Tensor([ x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h ]) base_anchors.append(base_anchor) base_anchors = torch.stack(base_anchors, dim=0) return base_anchors def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'): """Generate responsible anchor flags of grid cells in multiple scales. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). device (str): Device where the anchors will be put on. Return: list(torch.Tensor): responsible flags of anchors in multiple level """ assert self.num_levels == len(featmap_sizes) multi_level_responsible_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] flags = self.single_level_responsible_flags( featmap_sizes[i], gt_bboxes, anchor_stride, self.num_base_anchors[i], device=device) multi_level_responsible_flags.append(flags) return multi_level_responsible_flags def single_level_responsible_flags(self, featmap_size, gt_bboxes, stride, num_base_anchors, device='cuda'): """Generate the responsible flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). stride (tuple(int)): stride of current level num_base_anchors (int): The number of base anchors. device (str, optional): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long() gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long() # row major indexing gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x responsible_grid = torch.zeros( feat_h * feat_w, dtype=torch.uint8, device=device) responsible_grid[gt_bboxes_grid_idx] = 1 responsible_grid = responsible_grid[:, None].expand( responsible_grid.size(0), num_base_anchors).contiguous().view(-1) return responsible_grid ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/builder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.utils import Registry, build_from_cfg PRIOR_GENERATORS = Registry('Generator for anchors and points') ANCHOR_GENERATORS = PRIOR_GENERATORS def build_prior_generator(cfg, default_args=None): return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) def build_anchor_generator(cfg, default_args=None): warnings.warn( '``build_anchor_generator`` would be deprecated soon, please use ' '``build_prior_generator`` ') return build_prior_generator(cfg, default_args=default_args) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/point_generator.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from torch.nn.modules.utils import _pair from .builder import PRIOR_GENERATORS @PRIOR_GENERATORS.register_module() class PointGenerator: def _meshgrid(self, x, y, row_major=True): xx = x.repeat(len(y)) yy = y.view(-1, 1).repeat(1, len(x)).view(-1) if row_major: return xx, yy else: return yy, xx def grid_points(self, featmap_size, stride=16, device='cuda'): feat_h, feat_w = featmap_size shift_x = torch.arange(0., feat_w, device=device) * stride shift_y = torch.arange(0., feat_h, device=device) * stride shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) stride = shift_x.new_full((shift_xx.shape[0], ), stride) shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_size, valid_size, device='cuda'): feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy return valid @PRIOR_GENERATORS.register_module() class MlvlPointGenerator: """Standard points generator for multi-level (Mlvl) feature maps in 2D points-based detectors. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels in order (w, h). offset (float): The offset of points, the value is normalized with corresponding stride. Defaults to 0.5. """ def __init__(self, strides, offset=0.5): self.strides = [_pair(stride) for stride in strides] self.offset = offset @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.strides) @property def num_base_priors(self): """list[int]: The number of priors (points) at a point on the feature grid""" return [1 for _ in range(len(self.strides))] def _meshgrid(self, x, y, row_major=True): yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exporting # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda', with_stride=False): """Generate grid points of multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels, each size arrange as as (h, w). dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. device (str): The device where the anchors will be put on. with_stride (bool): Whether to concatenate the stride to the last dimension of points. Return: list[torch.Tensor]: Points of multiple feature levels. The sizes of each tensor should be (N, 2) when with stride is ``False``, where N = width * height, width and height are the sizes of the corresponding feature level, and the last dimension 2 represent (coord_x, coord_y), otherwise the shape should be (N, 4), and the last dimension 4 represent (coord_x, coord_y, stride_w, stride_h). """ assert self.num_levels == len(featmap_sizes) multi_level_priors = [] for i in range(self.num_levels): priors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size, level_idx, dtype=torch.float32, device='cuda', with_stride=False): """Generate grid Points of a single level. Note: This function is usually called by method ``self.grid_priors``. Args: featmap_size (tuple[int]): Size of the feature maps, arrange as (h, w). level_idx (int): The index of corresponding feature map level. dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. device (str, optional): The device the tensor will be put on. Defaults to 'cuda'. with_stride (bool): Concatenate the stride to the last dimension of points. Return: Tensor: Points of single feature levels. The shape of tensor should be (N, 2) when with stride is ``False``, where N = width * height, width and height are the sizes of the corresponding feature level, and the last dimension 2 represent (coord_x, coord_y), otherwise the shape should be (N, 4), and the last dimension 4 represent (coord_x, coord_y, stride_w, stride_h). """ feat_h, feat_w = featmap_size stride_w, stride_h = self.strides[level_idx] shift_x = (torch.arange(0, feat_w, device=device) + self.offset) * stride_w # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly shift_x = shift_x.to(dtype) shift_y = (torch.arange(0, feat_h, device=device) + self.offset) * stride_h # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly shift_y = shift_y.to(dtype) shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) if not with_stride: shifts = torch.stack([shift_xx, shift_yy], dim=-1) else: # use `shape[0]` instead of `len(shift_xx)` for ONNX export stride_w = shift_xx.new_full((shift_xx.shape[0], ), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0], ), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): """Generate valid flags of points of multiple feature levels. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels, each size arrange as as (h, w). pad_shape (tuple(int)): The padded shape of the image, arrange as (h, w). device (str): The device where the anchors will be put on. Return: list(torch.Tensor): Valid flags of points of multiple levels. """ assert self.num_levels == len(featmap_sizes) multi_level_flags = [] for i in range(self.num_levels): point_stride = self.strides[i] feat_h, feat_w = featmap_sizes[i] h, w = pad_shape[:2] valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h) valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, device='cuda'): """Generate the valid flags of points of a single feature map. Args: featmap_size (tuple[int]): The size of feature maps, arrange as as (h, w). valid_size (tuple[int]): The valid size of the feature maps. The size arrange as as (h, w). device (str, optional): The device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each points in a single level \ feature map. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy return valid def sparse_priors(self, prior_idxs, featmap_size, level_idx, dtype=torch.float32, device='cuda'): """Generate sparse points according to the ``prior_idxs``. Args: prior_idxs (Tensor): The index of corresponding anchors in the feature map. featmap_size (tuple[int]): feature map size arrange as (w, h). level_idx (int): The level index of corresponding feature map. dtype (obj:`torch.dtype`): Date type of points. Defaults to ``torch.float32``. device (obj:`torch.device`): The device where the points is located. Returns: Tensor: Anchor with shape (N, 2), N should be equal to the length of ``prior_idxs``. And last dimension 2 represent (coord_x, coord_y). """ height, width = featmap_size x = (prior_idxs % width + self.offset) * self.strides[level_idx][0] y = ((prior_idxs // width) % height + self.offset) * self.strides[level_idx][1] prioris = torch.stack([x, y], 1).to(dtype) prioris = prioris.to(device) return prioris ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch def images_to_levels(target, num_levels): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_levels: end = start + n # level_targets.append(target[:, start:end].squeeze(0)) level_targets.append(target[:, start:end]) start = end return level_targets def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): """Check whether the anchors are inside the border. Args: flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). valid_flags (torch.Tensor): An existing valid flags of anchors. img_shape (tuple(int)): Shape of current image. allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. Returns: torch.Tensor: Flags indicating whether the anchors are inside a \ valid range. """ img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border) & \ (flat_anchors[:, 1] >= -allowed_border) & \ (flat_anchors[:, 2] < img_w + allowed_border) & \ (flat_anchors[:, 3] < img_h + allowed_border) else: inside_flags = valid_flags return inside_flags def calc_region(bbox, ratio, featmap_size=None): """Calculate a proportional bbox region. The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. Args: bbox (Tensor): Bboxes to calculate regions, shape (n, 4). ratio (float): Ratio of the output region. featmap_size (tuple): Feature map size used for clipping the boundary. Returns: tuple: x1, y1, x2, y2 """ x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, MaxIoUAssigner, RegionAssigner) from .builder import build_assigner, build_bbox_coder, build_sampler from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder, PseudoBBoxCoder, TBLRBBoxCoder) from .iou_calculators import BboxOverlaps2D, bbox_overlaps from .samplers import (BaseSampler, CombinedSampler, InstanceBalancedPosSampler, IoUBalancedNegSampler, OHEMSampler, PseudoSampler, RandomSampler, SamplingResult, ScoreHLRSampler) from .transforms import (bbox2distance, bbox2result, bbox2roi, bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping, bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh, distance2bbox, find_inside_bboxes, roi2bbox) __all__ = [ 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler', 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner', 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance', 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder', 'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .grid_assigner import GridAssigner from .hungarian_assigner import HungarianAssigner from .mask_hungarian_assigner import MaskHungarianAssigner from .max_iou_assigner import MaxIoUAssigner from .point_assigner import PointAssigner from .region_assigner import RegionAssigner from .sim_ota_assigner import SimOTAAssigner from .task_aligned_assigner import TaskAlignedAssigner from .uniform_assigner import UniformAssigner __all__ = [ 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner', 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner', 'TaskAlignedAssigner', 'MaskHungarianAssigner' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .max_iou_assigner import MaxIoUAssigner @BBOX_ASSIGNERS.register_module() class ApproxMaxIoUAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with an integer indicating the ground truth index. (semi-positive index: gt label (0-based), -1: background) - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, approxs, squares, approxs_per_octave, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to approxs. This method assign a gt bbox to each group of approxs (bboxes), each group of approxs is represent by a base approx (bbox) and will be assigned with -1, or a semi-positive number. background_label (-1) means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to background_label (-1) 2. use the max IoU of each group of approxs to assign 2. assign proposals whose iou with all gts < neg_iou_thr to background 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: approxs (Tensor): Bounding boxes to be assigned, shape(approxs_per_octave*n, 4). squares (Tensor): Base Bounding boxes to be assigned, shape(n, 4). approxs_per_octave (int): number of approxs per octave gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_squares = squares.size(0) num_gts = gt_bboxes.size(0) if num_squares == 0 or num_gts == 0: # No predictions and/or truth, return empty assignment overlaps = approxs.new(num_gts, num_squares) assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) return assign_result # re-organize anchors by approxs_per_octave x num_squares approxs = torch.transpose( approxs.view(num_squares, approxs_per_octave, 4), 0, 1).contiguous().view(-1, 4) assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( num_gts > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = approxs.device approxs = approxs.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() all_overlaps = self.iou_calculator(approxs, gt_bboxes) overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, num_gts).max(dim=0) overlaps = torch.transpose(overlaps, 0, 1) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( squares, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, squares, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/assign_result.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.utils import util_mixins class AssignResult(util_mixins.NiceRepr): """Stores assignments between predicted and truth boxes. Attributes: num_gts (int): the number of truth boxes considered when computing this assignment gt_inds (LongTensor): for each predicted box indicates the 1-based index of the assigned truth box. 0 means unassigned and -1 means ignore. max_overlaps (FloatTensor): the iou between the predicted box and its assigned truth box. labels (None | LongTensor): If specified, for each predicted box indicates the category label of the assigned truth box. Example: >>> # An assign result between 4 predicted boxes and 9 true boxes >>> # where only two boxes were assigned. >>> num_gts = 9 >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) >>> labels = torch.LongTensor([0, 3, 4, 0]) >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT >>> # Force addition of gt labels (when adding gt as proposals) >>> new_labels = torch.LongTensor([3, 4, 5]) >>> self.add_gt_(new_labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT """ def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels # Interface for possible user-defined properties self._extra_properties = {} @property def num_preds(self): """int: the number of predictions in this assignment""" return len(self.gt_inds) def set_extra_property(self, key, value): """Set user-defined new property.""" assert key not in self.info self._extra_properties[key] = value def get_extra_property(self, key): """Get user-defined property.""" return self._extra_properties.get(key, None) @property def info(self): """dict: a dictionary of info about the object""" basic_info = { 'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels, } basic_info.update(self._extra_properties) return basic_info def __nice__(self): """str: a "nice" summary string describing this assign result""" parts = [] parts.append(f'num_gts={self.num_gts!r}') if self.gt_inds is None: parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if self.max_overlaps is None: parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append('max_overlaps.shape=' f'{tuple(self.max_overlaps.shape)!r}') if self.labels is None: parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): """Create random AssignResult for tests or debugging. Args: num_preds: number of predicted boxes num_gts: number of true boxes p_ignore (float): probability of a predicted box assigned to an ignored truth p_assigned (float): probability of a predicted box not being assigned p_use_label (float | bool): with labels or not rng (None | int | numpy.random.RandomState): seed or state Returns: :obj:`AssignResult`: Randomly generated assign results. Example: >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA >>> self = AssignResult.random() >>> print(self.info) """ from mmdet.core.bbox import demodata rng = demodata.ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) p_use_label = kwargs.get('p_use_label', 0.5) num_classes = kwargs.get('p_use_label', 3) if num_gts is None: num_gts = rng.randint(0, 8) if num_preds is None: num_preds = rng.randint(0, 16) if num_gts == 0: max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) if p_use_label is True or p_use_label < rng.rand(): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = None else: import numpy as np # Create an overlap for each predicted box max_overlaps = torch.from_numpy(rng.rand(num_preds)) # Construct gt_inds for each predicted box is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) # maximum number of assignments constraints n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = torch.from_numpy( rng.rand(num_preds) < p_ignore) & is_assigned gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned].long() gt_inds = torch.from_numpy( rng.randint(1, num_gts + 1, size=num_preds)) gt_inds[is_ignore] = -1 gt_inds[~is_assigned] = 0 max_overlaps[~is_assigned] = 0 if p_use_label is True or p_use_label < rng.rand(): if num_classes == 0: labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy( # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class rng.randint(0, num_classes, size=num_preds)) labels[~is_assigned] = 0 else: labels = None self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): """Add ground truth as assigned results. Args: gt_labels (torch.Tensor): Labels of gt boxes """ self_inds = torch.arange( 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat( [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) if self.labels is not None: self.labels = torch.cat([gt_labels, self.labels]) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/atss_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class ATSSAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt If ``alpha`` is not None, it means that the dynamic cost ATSSAssigner is adopted, which is currently only used in the DDOD. Args: topk (float): number of bbox selected in each level """ def __init__(self, topk, alpha=None, iou_calculator=dict(type='BboxOverlaps2D'), ignore_iof_thr=-1): self.topk = topk self.alpha = alpha self.iou_calculator = build_iou_calculator(iou_calculator) self.ignore_iof_thr = ignore_iof_thr """Assign a corresponding gt bbox or background to each bbox. Args: topk (int): number of bbox selected in each level. alpha (float): param of cost rate for each proposal only in DDOD. Default None. iou_calculator (dict): builder of IoU calculator. Default dict(type='BboxOverlaps2D'). ignore_iof_thr (int): whether ignore max overlaps or not. Default -1 (1 or -1). """ # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py def assign(self, bboxes, num_level_bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None, cls_scores=None, bbox_preds=None): """Assign gt to bboxes. The assignment is done in following steps 1. compute iou between all bbox (bbox of all pyramid levels) and gt 2. compute center distance between all bbox and gt 3. on each pyramid level, for each gt, select k bbox whose center are closest to the gt center, so we total select k*l bbox as candidates for each gt 4. get corresponding iou for the these candidates, and compute the mean and std, set mean + std as the iou threshold 5. select these candidates whose iou are greater than or equal to the threshold as positive 6. limit the positive sample's center in gt If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds` are not None, the overlaps calculation in the first step will also include dynamic cost, which is currently only used in the DDOD. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). num_level_bboxes (List): num of bboxes in each level gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. Default None. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. Default None. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. Default None. Returns: :obj:`AssignResult`: The assign result. """ INF = 100000000 bboxes = bboxes[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) message = 'Invalid alpha parameter because cls_scores or ' \ 'bbox_preds are None. If you want to use the ' \ 'cost-based ATSSAssigner, please set cls_scores, ' \ 'bbox_preds and self.alpha at the same time. ' if self.alpha is None: # ATSSAssigner overlaps = self.iou_calculator(bboxes, gt_bboxes) if cls_scores is not None or bbox_preds is not None: warnings.warn(message) else: # Dynamic cost ATSSAssigner in DDOD assert cls_scores is not None and bbox_preds is not None, message # compute cls cost for bbox and GT cls_cost = torch.sigmoid(cls_scores[:, gt_labels]) # compute iou between all bbox and gt overlaps = self.iou_calculator(bbox_preds, gt_bboxes) # make sure that we are in element-wise multiplication assert cls_cost.shape == overlaps.shape # overlaps is actually a cost matrix overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha # assign 0 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), 0, dtype=torch.long) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) # compute center distance between all bbox and gt gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 gt_points = torch.stack((gt_cx, gt_cy), dim=1) bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) distances = (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt() if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): ignore_overlaps = self.iou_calculator( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr distances[ignore_idxs, :] = INF assigned_gt_inds[ignore_idxs] = -1 # Selecting candidates based on the center distance candidate_idxs = [] start_idx = 0 for level, bboxes_per_level in enumerate(num_level_bboxes): # on each pyramid level, for each gt, # select k bbox whose center are closest to the gt center end_idx = start_idx + bboxes_per_level distances_per_level = distances[start_idx:end_idx, :] selectable_k = min(self.topk, bboxes_per_level) _, topk_idxs_per_level = distances_per_level.topk( selectable_k, dim=0, largest=False) candidate_idxs.append(topk_idxs_per_level + start_idx) start_idx = end_idx candidate_idxs = torch.cat(candidate_idxs, dim=0) # get corresponding iou for the these candidates, and compute the # mean and std, set mean + std as the iou threshold candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] overlaps_mean_per_gt = candidate_overlaps.mean(0) overlaps_std_per_gt = candidate_overlaps.std(0) overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] # limit the positive sample's center in gt for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_bboxes_cx = bboxes_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_bboxes_cy = bboxes_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest IoU will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/base_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseAssigner(metaclass=ABCMeta): """Base assigner that assigns boxes to ground truth boxes.""" @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign boxes to either a ground truth boxes or a negative boxes.""" ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/center_region_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner def scale_boxes(bboxes, scale): """Expand an array of boxes by a given scale. Args: bboxes (Tensor): Shape (m, 4) scale (float): The scale factor of bboxes Returns: (Tensor): Shape (m, 4). Scaled bboxes """ assert bboxes.size(1) == 4 w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 w_half *= scale h_half *= scale boxes_scaled = torch.zeros_like(bboxes) boxes_scaled[:, 0] = x_c - w_half boxes_scaled[:, 2] = x_c + w_half boxes_scaled[:, 1] = y_c - h_half boxes_scaled[:, 3] = y_c + h_half return boxes_scaled def is_located_in(points, bboxes): """Are points located in bboxes. Args: points (Tensor): Points, shape: (m, 2). bboxes (Tensor): Bounding boxes, shape: (n, 4). Return: Tensor: Flags indicating if points are located in bboxes, shape: (m, n). """ assert points.size(1) == 2 assert bboxes.size(1) == 4 return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) def bboxes_area(bboxes): """Compute the area of an array of bboxes. Args: bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) Returns: Tensor: Area of the bboxes. Shape: (m, ) """ assert bboxes.size(1) == 4 w = (bboxes[:, 2] - bboxes[:, 0]) h = (bboxes[:, 3] - bboxes[:, 1]) areas = w * h return areas @BBOX_ASSIGNERS.register_module() class CenterRegionAssigner(BaseAssigner): """Assign pixels at the center region of a bbox as positive. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: negative samples - semi-positive numbers: positive sample, index (0-based) of assigned gt Args: pos_scale (float): Threshold within which pixels are labelled as positive. neg_scale (float): Threshold above which pixels are labelled as positive. min_pos_iof (float): Minimum iof of a pixel with a gt to be labelled as positive. Default: 1e-2 ignore_gt_scale (float): Threshold within which the pixels are ignored when the gt is labelled as shadowed. Default: 0.5 foreground_dominate (bool): If True, the bbox will be assigned as positive when a gt's kernel region overlaps with another's shadowed (ignored) region, otherwise it is set as ignored. Default to False. """ def __init__(self, pos_scale, neg_scale, min_pos_iof=1e-2, ignore_gt_scale=0.5, foreground_dominate=False, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_scale = pos_scale self.neg_scale = neg_scale self.min_pos_iof = min_pos_iof self.ignore_gt_scale = ignore_gt_scale self.foreground_dominate = foreground_dominate self.iou_calculator = build_iou_calculator(iou_calculator) def get_gt_priorities(self, gt_bboxes): """Get gt priorities according to their areas. Smaller gt has higher priority. Args: gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). Returns: Tensor: The priority of gts so that gts with larger priority is \ more likely to be assigned. Shape (k, ) """ gt_areas = bboxes_area(gt_bboxes) # Rank all gt bbox areas. Smaller objects has larger priority _, sort_idx = gt_areas.sort(descending=True) sort_idx = sort_idx.argsort() return sort_idx def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assigns gts to every bbox (proposal/anchor), each bbox \ will be assigned with -1, or a semi-positive number. -1 means \ negative sample, semi-positive number is the index (0-based) of \ assigned gt. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,). Returns: :obj:`AssignResult`: The assigned result. Note that \ shadowed_labels of shape (N, 2) is also added as an \ `assign_result` attribute. `shadowed_labels` is a tensor \ composed of N pairs of anchor_ind, class_label], where N \ is the number of anchors that lie in the outer region of a \ gt, anchor_ind is the shadowed anchor index and class_label \ is the shadowed class label. Example: >>> self = CenterRegionAssigner(0.2, 0.2) >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]]) >>> assign_result = self.assign(bboxes, gt_bboxes) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ # There are in total 5 steps in the pixel assignment # 1. Find core (the center region, say inner 0.2) # and shadow (the relatively ourter part, say inner 0.2-0.5) # regions of every gt. # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in # the image. # 3.1. For overlapping objects, the prior bboxes in gt_core is # assigned with the object with smallest area # 4. Assign prior bboxes with class label according to its gt id. # 4.1. Assign -1 to prior bboxes lying in shadowed gts # 4.2. Assign positive prior boxes with the corresponding label # 5. Find pixels lying in the shadow of an object and assign them with # background label, but set the loss weight of its corresponding # gt to zero. assert bboxes.size(1) == 4, 'bboxes must have size of 4' # 1. Find core positive and shadow region of every gt gt_core = scale_boxes(gt_bboxes, self.pos_scale) gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) # 2. Find prior bboxes that lie in gt_core and gt_shadow regions bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2 # The center points lie within the gt boxes is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes) # Only calculate bbox and gt_core IoF. This enables small prior bboxes # to match large gts bbox_and_gt_core_overlaps = self.iou_calculator( bboxes, gt_core, mode='iof') # The center point of effective priors should be within the gt box is_bbox_in_gt_core = is_bbox_in_gt & ( bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) is_bbox_in_gt_shadow = ( self.iou_calculator(bboxes, gt_shadow, mode='iof') > self.min_pos_iof) # Rule out center effective positive pixels is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core) num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) if num_gts == 0 or num_bboxes == 0: # If no gts exist, assign all pixels to negative assigned_gt_ids = \ is_bbox_in_gt_core.new_zeros((num_bboxes,), dtype=torch.long) pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) else: # Step 3: assign a one-hot gt id to each pixel, and smaller objects # have high priority to assign the pixel. sort_idx = self.get_gt_priorities(gt_bboxes) assigned_gt_ids, pixels_in_gt_shadow = \ self.assign_one_hot_gt_indices(is_bbox_in_gt_core, is_bbox_in_gt_shadow, gt_priority=sort_idx) if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0: # No ground truth or boxes, return empty assignment gt_bboxes_ignore = scale_boxes( gt_bboxes_ignore, scale=self.ignore_gt_scale) is_bbox_in_ignored_gts = is_located_in(bbox_centers, gt_bboxes_ignore) is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1) assigned_gt_ids[is_bbox_in_ignored_gts] = -1 # 4. Assign prior bboxes with class label according to its gt id. assigned_labels = None shadowed_pixel_labels = None if gt_labels is not None: # Default assigned label is the background (-1) assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_ids > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] - 1] # 5. Find pixels lying in the shadow of an object shadowed_pixel_labels = pixels_in_gt_shadow.clone() if pixels_in_gt_shadow.numel() > 0: pixel_idx, gt_idx =\ pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ 'Some pixels are dually assigned to ignore and gt!' shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] override = ( assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) if self.foreground_dominate: # When a pixel is both positive and shadowed, set it as pos shadowed_pixel_labels = shadowed_pixel_labels[~override] else: # When a pixel is both pos and shadowed, set it as shadowed assigned_labels[pixel_idx[override]] = -1 assigned_gt_ids[pixel_idx[override]] = 0 assign_result = AssignResult( num_gts, assigned_gt_ids, None, labels=assigned_labels) # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) assign_result.set_extra_property('shadowed_labels', shadowed_pixel_labels) return assign_result def assign_one_hot_gt_indices(self, is_bbox_in_gt_core, is_bbox_in_gt_shadow, gt_priority=None): """Assign only one gt index to each prior box. Gts with large gt_priority are more likely to be assigned. Args: is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center is in the core area of a gt (e.g. 0-0.2). Shape: (num_prior, num_gt). is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox center is in the shadowed area of a gt (e.g. 0.2-0.5). Shape: (num_prior, num_gt). gt_priority (Tensor): Priorities of gts. The gt with a higher priority is more likely to be assigned to the bbox when the bbox match with multiple gts. Shape: (num_gt, ). Returns: tuple: Returns (assigned_gt_inds, shadowed_gt_inds). - assigned_gt_inds: The assigned gt index of each prior bbox \ (i.e. index from 1 to num_gts). Shape: (num_prior, ). - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ shape (num_ignore, 2) with first column being the \ shadowed prior bbox indices and the second column the \ shadowed gt indices (1-based). """ num_bboxes, num_gts = is_bbox_in_gt_core.shape if gt_priority is None: gt_priority = torch.arange( num_gts, device=is_bbox_in_gt_core.device) assert gt_priority.size(0) == num_gts # The bigger gt_priority, the more preferable to be assigned # The assigned inds are by default 0 (background) assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ), dtype=torch.long) # Shadowed bboxes are assigned to be background. But the corresponding # label is ignored during loss calculation, which is done through # shadowed_gt_inds shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False) if is_bbox_in_gt_core.sum() == 0: # No gt match shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue return assigned_gt_inds, shadowed_gt_inds # The priority of each prior box and gt pair. If one prior box is # matched bo multiple gts. Only the pair with the highest priority # is saved pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts), -1, dtype=torch.long) # Each bbox could match with multiple gts. # The following codes deal with this situation # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) inds_of_match = torch.any(is_bbox_in_gt_core, dim=1) # The matched gt index of each positive bbox. Length >= num_pos_anchor # , since one bbox could match multiple gts matched_bbox_gt_inds = torch.nonzero( is_bbox_in_gt_core, as_tuple=False)[:, 1] # Assign priority to each bbox-gt pair. pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds] _, argmax_priority = pair_priority[inds_of_match].max(dim=1) assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based # Zero-out the assigned anchor box to filter the shadowed gt indices is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0 # Concat the shadowed indices due to overlapping with that out side of # effective scale. shape: (total_num_ignore, 2) shadowed_gt_inds = torch.cat( (shadowed_gt_inds, torch.nonzero( is_bbox_in_gt_core, as_tuple=False)), dim=0) # `is_bbox_in_gt_core` should be changed back to keep arguments intact. is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1 # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` if shadowed_gt_inds.numel() > 0: shadowed_gt_inds[:, 1] += 1 return assigned_gt_inds, shadowed_gt_inds ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/grid_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class GridAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None): """Assign gt to bboxes. The process is very much like the max iou assigner, except that positive samples are constrained within the cell that the gt boxes fell in. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to -1 2. assign proposals whose iou with all gts <= neg_iou_thr to 0 3. for each bbox within a cell, if the iou with its nearest gt > pos_iou_thr and the center of that gt falls inside the cell, assign it to that bbox 4. for each gt bbox, assign its nearest proposals within the cell the gt bbox falls in to itself. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). box_responsible_flags (Tensor): flag to indicate whether box is responsible for prediction, shape(n, ) gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) # compute iou between all gt and bboxes overlaps = self.iou_calculator(gt_bboxes, bboxes) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # 2. assign negative: below # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts # shape of max_overlaps == argmax_overlaps == num_bboxes max_overlaps, argmax_overlaps = overlaps.max(dim=0) if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps <= self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, (tuple, list)): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0]) & (max_overlaps <= self.neg_iou_thr[1])] = 0 # 3. assign positive: falls into responsible cell and above # positive IOU threshold, the order matters. # the prior condition of comparison is to filter out all # unrelated anchors, i.e. not box_responsible_flags overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1. # calculate max_overlaps again, but this time we only consider IOUs # for anchors responsible for prediction max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) pos_inds = (max_overlaps > self.pos_iou_thr) & box_responsible_flags.type(torch.bool) assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 # 4. assign positive to max overlapped anchors within responsible cell for i in range(num_gts): if gt_max_overlaps[i] > self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \ box_responsible_flags.type(torch.bool) assigned_gt_inds[max_iou_inds] = i + 1 elif box_responsible_flags[gt_argmax_overlaps[i]]: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 # assign labels of positive anchors if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/hungarian_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from scipy.optimize import linear_sum_assignment from ..builder import BBOX_ASSIGNERS from ..match_costs import build_match_cost from ..transforms import bbox_cxcywh_to_xyxy from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class HungarianAssigner(BaseAssigner): """Computes one-to-one matching between predictions and ground truth. This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of three components: classification cost, regression L1 cost and regression iou cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned with `0` or a positive integer indicating the ground truth index: - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: cls_weight (int | float, optional): The scale factor for classification cost. Default 1.0. bbox_weight (int | float, optional): The scale factor for regression L1 cost. Default 1.0. iou_weight (int | float, optional): The scale factor for regression iou cost. Default 1.0. iou_calculator (dict | optional): The config for the iou calculation. Default type `BboxOverlaps2D`. iou_mode (str | optional): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). Default "giou". """ def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.reg_cost = build_match_cost(reg_cost) self.iou_cost = build_match_cost(iou_cost) def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-7): """Computes one-to-one matching based on the weighted costs. This method assign each query prediction to a ground truth or background. The `assigned_gt_inds` with -1 means don't care, 0 means negative sample, and positive number is the index (1-based) of assigned gt. The assignment is done in the following steps, the order matters. 1. assign every prediction to -1 2. compute the weighted costs 3. do Hungarian matching on CPU based on the costs 4. assign all to 0 (background) first, then for each matched pair between predictions and gts, treat this prediction as foreground and assign the corresponding gt index (plus 1) to it. Args: bbox_pred (Tensor): Predicted boxes with normalized coordinates (cx, cy, w, h), which are all in range [0, 1]. Shape [num_query, 4]. cls_pred (Tensor): Predicted classification logits, shape [num_query, num_class]. gt_bboxes (Tensor): Ground truth boxes with unnormalized coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). img_meta (dict): Meta information for current image. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`. Default None. eps (int | float, optional): A value added to the denominator for numerical stability. Default 1e-7. Returns: :obj:`AssignResult`: The assigned result. """ assert gt_bboxes_ignore is None, \ 'Only case when gt_bboxes_ignore is None is supported.' num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 1. assign -1 by default assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) img_h, img_w, _ = img_meta['img_shape'] factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) # 2. compute the weighted costs # classification and bboxcost. cls_cost = self.cls_cost(cls_pred, gt_labels) # regression L1 cost normalize_gt_bboxes = gt_bboxes / factor reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) # regression iou cost, defaultly giou is used in official DETR. bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor iou_cost = self.iou_cost(bboxes, gt_bboxes) # weighted sum of above three costs cost = cls_cost + reg_cost + iou_cost # 3. do Hungarian matching on CPU using linear_sum_assignment cost = cost.detach().cpu() matched_row_inds, matched_col_inds = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to( bbox_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to( bbox_pred.device) # 4. assign backgrounds and foregrounds # assign all indices to backgrounds first assigned_gt_inds[:] = 0 # assign foregrounds based on matching results assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/mask_hungarian_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from scipy.optimize import linear_sum_assignment from mmdet.core.bbox.builder import BBOX_ASSIGNERS from mmdet.core.bbox.match_costs.builder import build_match_cost from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class MaskHungarianAssigner(BaseAssigner): """Computes one-to-one matching between predictions and ground truth for mask. This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of three components: classification cost, mask focal cost and mask dice cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned with `0` or a positive integer indicating the ground truth index: - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config. mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config. dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config. """ def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict( type='FocalLossCost', weight=1.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.mask_cost = build_match_cost(mask_cost) self.dice_cost = build_match_cost(dice_cost) def assign(self, cls_pred, mask_pred, gt_labels, gt_mask, img_meta, gt_bboxes_ignore=None, eps=1e-7): """Computes one-to-one matching based on the weighted costs. Args: cls_pred (Tensor | None): Class prediction in shape (num_query, cls_out_channels). mask_pred (Tensor): Mask prediction in shape (num_query, H, W). gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ). gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W). img_meta (dict): Meta information for current image. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`. Default None. eps (int | float, optional): A value added to the denominator for numerical stability. Default 1e-7. Returns: :obj:`AssignResult`: The assigned result. """ assert gt_bboxes_ignore is None, \ 'Only case when gt_bboxes_ignore is None is supported.' # K-Net sometimes passes cls_pred=None to this assigner. # So we should use the shape of mask_pred num_gt, num_query = gt_labels.shape[0], mask_pred.shape[0] # 1. assign -1 by default assigned_gt_inds = mask_pred.new_full((num_query, ), -1, dtype=torch.long) assigned_labels = mask_pred.new_full((num_query, ), -1, dtype=torch.long) if num_gt == 0 or num_query == 0: # No ground truth or boxes, return empty assignment if num_gt == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 return AssignResult( num_gt, assigned_gt_inds, None, labels=assigned_labels) # 2. compute the weighted costs # classification and maskcost. if self.cls_cost.weight != 0 and cls_pred is not None: cls_cost = self.cls_cost(cls_pred, gt_labels) else: cls_cost = 0 if self.mask_cost.weight != 0: # mask_pred shape = [num_query, h, w] # gt_mask shape = [num_gt, h, w] # mask_cost shape = [num_query, num_gt] mask_cost = self.mask_cost(mask_pred, gt_mask) else: mask_cost = 0 if self.dice_cost.weight != 0: dice_cost = self.dice_cost(mask_pred, gt_mask) else: dice_cost = 0 cost = cls_cost + mask_cost + dice_cost # 3. do Hungarian matching on CPU using linear_sum_assignment cost = cost.detach().cpu() matched_row_inds, matched_col_inds = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to( mask_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to( mask_pred.device) # 4. assign backgrounds and foregrounds # assign all indices to backgrounds first assigned_gt_inds[:] = 0 # assign foregrounds based on matching results assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult( num_gt, assigned_gt_inds, None, labels=assigned_labels) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/max_iou_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class MaxIoUAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, or a semi-positive integer indicating the ground truth index. - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). `min_pos_iou` is set to avoid assigning bboxes that have extremely small iou with GT as positive samples. It brings about 0.3 mAP improvements in 1x schedule but does not affect the performance of 3x schedule. More comparisons can be found in `PR #7464 `_. gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow low quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. Details are demonstrated in Step 4. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, or a semi-positive number. -1 means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to the background 2. assign proposals whose iou with all gts < neg_iou_thr to 0 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. Example: >>> self = MaxIoUAssigner(0.5, 0.5) >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]]) >>> assign_result = self.assign(bboxes, gt_bboxes) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( gt_bboxes.shape[0] > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = bboxes.device bboxes = bboxes.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() overlaps = self.iou_calculator(gt_bboxes, bboxes) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, bboxes, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result def assign_wrt_overlaps(self, overlaps, gt_labels=None): """Assign w.r.t. the overlaps of bboxes with gts. Args: overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, shape(k, n). gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) # 2. assign negative: below # the negative inds are set to be 0 if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps < self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) & (max_overlaps < self.neg_iou_thr[1])] = 0 # 3. assign positive: above positive IoU threshold pos_inds = max_overlaps >= self.pos_iou_thr assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 if self.match_low_quality: # Low-quality matching will overwrite the assigned_gt_inds assigned # in Step 3. Thus, the assigned gt might not be the best one for # prediction. # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, # bbox 1 will be assigned as the best target for bbox A in step 3. # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's # assigned_gt_inds will be overwritten to be bbox 2. # This might be the reason that it is not used in ROI Heads. for i in range(num_gts): if gt_max_overlaps[i] >= self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] assigned_gt_inds[max_iou_inds] = i + 1 else: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/point_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class PointAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each point. Each proposals will be assigned with `0`, or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt """ def __init__(self, scale=4, pos_num=3): self.scale = scale self.pos_num = pos_num def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to points. This method assign a gt bbox to every points set, each points set will be assigned with the background_label (-1), or a label number. -1 is background, and semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every points to the background_label (-1) 2. A point is assigned to some gt bbox if (i) the point is within the k closest points to the gt bbox (ii) the distance between this point and the gt is smaller than other gt bboxes Args: points (Tensor): points to be assigned, shape(n, 3) while last dimension stands for (x, y, stride). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. NOTE: currently unused. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_points = points.shape[0] num_gts = gt_bboxes.shape[0] if num_gts == 0 or num_points == 0: # If no truth assign everything to the background assigned_gt_inds = points.new_full((num_points, ), 0, dtype=torch.long) if gt_labels is None: assigned_labels = None else: assigned_labels = points.new_full((num_points, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) points_xy = points[:, :2] points_stride = points[:, 2] points_lvl = torch.log2( points_stride).int() # [3...,4...,5...,6...,7...] lvl_min, lvl_max = points_lvl.min(), points_lvl.max() # assign gt box gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) scale = self.scale gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) # stores the assigned gt index of each point assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) # stores the assigned gt dist (to this point) of each point assigned_gt_dist = points.new_full((num_points, ), float('inf')) points_range = torch.arange(points.shape[0]) for idx in range(num_gts): gt_lvl = gt_bboxes_lvl[idx] # get the index of points in this level lvl_idx = gt_lvl == points_lvl points_index = points_range[lvl_idx] # get the points in this level lvl_points = points_xy[lvl_idx, :] # get the center point of gt gt_point = gt_bboxes_xy[[idx], :] # get width and height of gt gt_wh = gt_bboxes_wh[[idx], :] # compute the distance between gt center and # all points in this level points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) # find the nearest k points to gt center in this level min_dist, min_dist_index = torch.topk( points_gt_dist, self.pos_num, largest=False) # the index of nearest k points to gt center in this level min_dist_points_index = points_index[min_dist_index] # The less_than_recorded_index stores the index # of min_dist that is less then the assigned_gt_dist. Where # assigned_gt_dist stores the dist from previous assigned gt # (if exist) to each point. less_than_recorded_index = min_dist < assigned_gt_dist[ min_dist_points_index] # The min_dist_points_index stores the index of points satisfy: # (1) it is k nearest to current gt center in this level. # (2) it is closer to current gt center than other gt center. min_dist_points_index = min_dist_points_index[ less_than_recorded_index] # assign the result assigned_gt_inds[min_dist_points_index] = idx + 1 assigned_gt_dist[min_dist_points_index] = min_dist[ less_than_recorded_index] if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/region_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import anchor_inside_flags from ..builder import BBOX_ASSIGNERS from .assign_result import AssignResult from .base_assigner import BaseAssigner def calc_region(bbox, ratio, stride, featmap_size=None): """Calculate region of the box defined by the ratio, the ratio is from the center of the box to every edge.""" # project bbox on the feature f_bbox = bbox / stride x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2) def anchor_ctr_inside_region_flags(anchors, stride, region): """Get the flag indicate whether anchor centers are inside regions.""" x1, y1, x2, y2 = region f_anchors = anchors / stride x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) return flags @BBOX_ASSIGNERS.register_module() class RegionAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: center_ratio: ratio of the region in the center of the bbox to define positive sample. ignore_ratio: ratio of the region to define ignore samples. """ def __init__(self, center_ratio=0.2, ignore_ratio=0.5): self.center_ratio = center_ratio self.ignore_ratio = ignore_ratio def assign(self, mlvl_anchors, mlvl_valid_flags, gt_bboxes, img_meta, featmap_sizes, anchor_scale, anchor_strides, gt_bboxes_ignore=None, gt_labels=None, allowed_border=0): """Assign gt to anchors. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, and the order matters. 1. Assign every anchor to 0 (negative) 2. (For each gt_bboxes) Compute ignore flags based on ignore_region then assign -1 to anchors w.r.t. ignore flags 3. (For each gt_bboxes) Compute pos flags based on center_region then assign gt_bboxes to anchors w.r.t. pos flags 4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor level then assign -1 to anchors w.r.t. ignore flags 5. Assign anchor outside of image to -1 Args: mlvl_anchors (list[Tensor]): Multi level anchors. mlvl_valid_flags (list[Tensor]): Multi level valid flags. gt_bboxes (Tensor): Ground truth bboxes of image img_meta (dict): Meta info of image. featmap_sizes (list[Tensor]): Feature mapsize each level anchor_scale (int): Scale of the anchor. anchor_strides (list[int]): Stride of the anchor. gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. Returns: :obj:`AssignResult`: The assign result. """ if gt_bboxes_ignore is not None: raise NotImplementedError num_gts = gt_bboxes.shape[0] num_bboxes = sum(x.shape[0] for x in mlvl_anchors) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = gt_bboxes.new_zeros((num_bboxes, )) assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ), dtype=torch.long) if gt_labels is None: assigned_labels = None else: assigned_labels = gt_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) num_lvls = len(mlvl_anchors) r1 = (1 - self.center_ratio) / 2 r2 = (1 - self.ignore_ratio) / 2 scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() # 1. assign 0 (negative) by default mlvl_assigned_gt_inds = [] mlvl_ignore_flags = [] for lvl in range(num_lvls): h, w = featmap_sizes[lvl] assert h * w == mlvl_anchors[lvl].shape[0] assigned_gt_inds = gt_bboxes.new_full((h * w, ), 0, dtype=torch.long) ignore_flags = torch.zeros_like(assigned_gt_inds) mlvl_assigned_gt_inds.append(assigned_gt_inds) mlvl_ignore_flags.append(ignore_flags) for gt_id in range(num_gts): lvl = target_lvls[gt_id].item() featmap_size = featmap_sizes[lvl] stride = anchor_strides[lvl] anchors = mlvl_anchors[lvl] gt_bbox = gt_bboxes[gt_id, :4] # Compute regions ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) # 2. Assign -1 to ignore flags ignore_flags = anchor_ctr_inside_region_flags( anchors, stride, ignore_region) mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 # 3. Assign gt_bboxes to pos flags pos_flags = anchor_ctr_inside_region_flags(anchors, stride, ctr_region) mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1 # 4. Assign -1 to ignore adjacent lvl if lvl > 0: d_lvl = lvl - 1 d_anchors = mlvl_anchors[d_lvl] d_featmap_size = featmap_sizes[d_lvl] d_stride = anchor_strides[d_lvl] d_ignore_region = calc_region(gt_bbox, r2, d_stride, d_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( d_anchors, d_stride, d_ignore_region) mlvl_ignore_flags[d_lvl][ignore_flags] = 1 if lvl < num_lvls - 1: u_lvl = lvl + 1 u_anchors = mlvl_anchors[u_lvl] u_featmap_size = featmap_sizes[u_lvl] u_stride = anchor_strides[u_lvl] u_ignore_region = calc_region(gt_bbox, r2, u_stride, u_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( u_anchors, u_stride, u_ignore_region) mlvl_ignore_flags[u_lvl][ignore_flags] = 1 # 4. (cont.) Assign -1 to ignore adjacent lvl for lvl in range(num_lvls): ignore_flags = mlvl_ignore_flags[lvl] mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 # 5. Assign -1 to anchor outside of image flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) flat_anchors = torch.cat(mlvl_anchors) flat_valid_flags = torch.cat(mlvl_valid_flags) assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == flat_valid_flags.shape[0]) inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags, img_meta['img_shape'], allowed_border) outside_flags = ~inside_flags flat_assigned_gt_inds[outside_flags] = -1 if gt_labels is not None: assigned_labels = torch.zeros_like(flat_assigned_gt_inds) pos_flags = assigned_gt_inds > 0 assigned_labels[pos_flags] = gt_labels[ flat_assigned_gt_inds[pos_flags] - 1] else: assigned_labels = None return AssignResult( num_gts, flat_assigned_gt_inds, None, labels=assigned_labels) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/sim_ota_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn.functional as F from ..builder import BBOX_ASSIGNERS from ..iou_calculators import bbox_overlaps from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class SimOTAAssigner(BaseAssigner): """Computes matching between predictions and ground truth. Args: center_radius (int | float, optional): Ground truth center size to judge whether a prior is in center. Default 2.5. candidate_topk (int, optional): The candidate top-k which used to get top-k ious to calculate dynamic-k. Default 10. iou_weight (int | float, optional): The scale factor for regression iou cost. Default 3.0. cls_weight (int | float, optional): The scale factor for classification cost. Default 1.0. """ def __init__(self, center_radius=2.5, candidate_topk=10, iou_weight=3.0, cls_weight=1.0): self.center_radius = center_radius self.candidate_topk = candidate_topk self.iou_weight = iou_weight self.cls_weight = cls_weight def assign(self, pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore=None, eps=1e-7): """Assign gt to priors using SimOTA. It will switch to CPU mode when GPU is out of memory. Args: pred_scores (Tensor): Classification scores of one image, a 2D-Tensor with shape [num_priors, num_classes] priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth labels of one image, a Tensor with shape [num_gts]. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. eps (float): A value added to the denominator for numerical stability. Default 1e-7. Returns: assign_result (obj:`AssignResult`): The assigned result. """ try: assign_result = self._assign(pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore, eps) return assign_result except RuntimeError: origin_device = pred_scores.device warnings.warn('OOM RuntimeError is raised due to the huge memory ' 'cost during label assignment. CPU mode is applied ' 'in this batch. If you want to avoid this issue, ' 'try to reduce the batch size or image size.') torch.cuda.empty_cache() pred_scores = pred_scores.cpu() priors = priors.cpu() decoded_bboxes = decoded_bboxes.cpu() gt_bboxes = gt_bboxes.cpu().float() gt_labels = gt_labels.cpu() assign_result = self._assign(pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore, eps) assign_result.gt_inds = assign_result.gt_inds.to(origin_device) assign_result.max_overlaps = assign_result.max_overlaps.to( origin_device) assign_result.labels = assign_result.labels.to(origin_device) return assign_result def _assign(self, pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore=None, eps=1e-7): """Assign gt to priors using SimOTA. Args: pred_scores (Tensor): Classification scores of one image, a 2D-Tensor with shape [num_priors, num_classes] priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth labels of one image, a Tensor with shape [num_gts]. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. eps (float): A value added to the denominator for numerical stability. Default 1e-7. Returns: :obj:`AssignResult`: The assigned result. """ INF = 100000.0 num_gt = gt_bboxes.size(0) num_bboxes = decoded_bboxes.size(0) # assign 0 by default assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), 0, dtype=torch.long) valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( priors, gt_bboxes) valid_decoded_bbox = decoded_bboxes[valid_mask] valid_pred_scores = pred_scores[valid_mask] num_valid = valid_decoded_bbox.size(0) if num_gt == 0 or num_bboxes == 0 or num_valid == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) pairwise_ious = bbox_overlaps(valid_decoded_bbox, gt_bboxes) iou_cost = -torch.log(pairwise_ious + eps) gt_onehot_label = ( F.one_hot(gt_labels.to(torch.int64), pred_scores.shape[-1]).float().unsqueeze(0).repeat( num_valid, 1, 1)) valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) cls_cost = ( F.binary_cross_entropy( valid_pred_scores.to(dtype=torch.float32).sqrt_(), gt_onehot_label, reduction='none', ).sum(-1).to(dtype=valid_pred_scores.dtype)) cost_matrix = ( cls_cost * self.cls_weight + iou_cost * self.iou_weight + (~is_in_boxes_and_center) * INF) matched_pred_ious, matched_gt_inds = \ self.dynamic_k_matching( cost_matrix, pairwise_ious, num_gt, valid_mask) # convert to AssignResult format assigned_gt_inds[valid_mask] = matched_gt_inds + 1 assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), -INF, dtype=torch.float32) max_overlaps[valid_mask] = matched_pred_ious return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) def get_in_gt_and_in_center_info(self, priors, gt_bboxes): num_gt = gt_bboxes.size(0) repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt) repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt) repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt) repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt) # is prior centers in gt bboxes, shape: [n_prior, n_gt] l_ = repeated_x - gt_bboxes[:, 0] t_ = repeated_y - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - repeated_x b_ = gt_bboxes[:, 3] - repeated_y deltas = torch.stack([l_, t_, r_, b_], dim=1) is_in_gts = deltas.min(dim=1).values > 0 is_in_gts_all = is_in_gts.sum(dim=1) > 0 # is prior centers in gt centers gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 ct_box_l = gt_cxs - self.center_radius * repeated_stride_x ct_box_t = gt_cys - self.center_radius * repeated_stride_y ct_box_r = gt_cxs + self.center_radius * repeated_stride_x ct_box_b = gt_cys + self.center_radius * repeated_stride_y cl_ = repeated_x - ct_box_l ct_ = repeated_y - ct_box_t cr_ = ct_box_r - repeated_x cb_ = ct_box_b - repeated_y ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1) is_in_cts = ct_deltas.min(dim=1).values > 0 is_in_cts_all = is_in_cts.sum(dim=1) > 0 # in boxes or in centers, shape: [num_priors] is_in_gts_or_centers = is_in_gts_all | is_in_cts_all # both in boxes and centers, shape: [num_fg, num_gt] is_in_boxes_and_centers = ( is_in_gts[is_in_gts_or_centers, :] & is_in_cts[is_in_gts_or_centers, :]) return is_in_gts_or_centers, is_in_boxes_and_centers def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask): matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) # select candidate topk ious for dynamic-k calculation candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) # calculate dynamic k for each gt dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) matching_matrix[:, gt_idx][pos_idx] = 1 del topk_ious, dynamic_ks, pos_idx prior_match_gt_mask = matching_matrix.sum(1) > 1 if prior_match_gt_mask.sum() > 0: cost_min, cost_argmin = torch.min( cost[prior_match_gt_mask, :], dim=1) matching_matrix[prior_match_gt_mask, :] *= 0 matching_matrix[prior_match_gt_mask, cost_argmin] = 1 # get foreground mask inside box and center prior fg_mask_inboxes = matching_matrix.sum(1) > 0 valid_mask[valid_mask.clone()] = fg_mask_inboxes matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) matched_pred_ious = (matching_matrix * pairwise_ious).sum(1)[fg_mask_inboxes] return matched_pred_ious, matched_gt_inds ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/task_aligned_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000000 @BBOX_ASSIGNERS.register_module() class TaskAlignedAssigner(BaseAssigner): """Task aligned assigner used in the paper: `TOOD: Task-aligned One-stage Object Detection. `_. Assign a corresponding gt bbox or background to each predicted bbox. Each bbox will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: topk (int): number of bbox selected in each level iou_calculator (dict): Config dict for iou calculator. Default: dict(type='BboxOverlaps2D') """ def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')): assert topk >= 1 self.topk = topk self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, pred_scores, decode_bboxes, anchors, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None, alpha=1, beta=6): """Assign gt to bboxes. The assignment is done in following steps 1. compute alignment metric between all bbox (bbox of all pyramid levels) and gt 2. select top-k bbox as candidates for each gt 3. limit the positive sample's center in gt (because the anchor-free detector only can predict positive distance) Args: pred_scores (Tensor): predicted class probability, shape(n, num_classes) decode_bboxes (Tensor): predicted bounding boxes, shape(n, 4) anchors (Tensor): pre-defined anchors, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`TaskAlignedAssignResult`: The assign result. """ anchors = anchors[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), anchors.size(0) # compute alignment metric between all bbox and gt overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach() bbox_scores = pred_scores[:, gt_labels].detach() # assign 0 by default assigned_gt_inds = anchors.new_full((num_bboxes, ), 0, dtype=torch.long) assign_metrics = anchors.new_zeros((num_bboxes, )) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = anchors.new_zeros((num_bboxes, )) if num_gt == 0: # No gt boxes, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = anchors.new_full((num_bboxes, ), -1, dtype=torch.long) assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result # select top-k bboxes as candidates for each gt alignment_metrics = bbox_scores**alpha * overlaps**beta topk = min(self.topk, alignment_metrics.size(0)) _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True) candidate_metrics = alignment_metrics[candidate_idxs, torch.arange(num_gt)] is_pos = candidate_metrics > 0 # limit the positive sample's center in gt anchors_cx = (anchors[:, 0] + anchors[:, 2]) / 2.0 anchors_cy = (anchors[:, 1] + anchors[:, 3]) / 2.0 for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_anchors_cx = anchors_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_anchors_cy = anchors_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_anchors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_anchors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_anchors_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_anchors_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest iou will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 assign_metrics[max_overlaps != -INF] = alignment_metrics[ max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]] if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/uniform_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from ..transforms import bbox_xyxy_to_cxcywh from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class UniformAssigner(BaseAssigner): """Uniform Matching between the anchors and gt boxes, which can achieve balance in positive anchors, and gt_bboxes_ignore was not considered for now. Args: pos_ignore_thr (float): the threshold to ignore positive anchors neg_ignore_thr (float): the threshold to ignore negative anchors match_times(int): Number of positive anchors for each gt box. Default 4. iou_calculator (dict): iou_calculator config """ def __init__(self, pos_ignore_thr, neg_ignore_thr, match_times=4, iou_calculator=dict(type='BboxOverlaps2D')): self.match_times = match_times self.pos_ignore_thr = pos_ignore_thr self.neg_ignore_thr = neg_ignore_thr self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bbox_pred, anchor, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 1. assign -1 by default assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), 0, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 assign_result = AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) assign_result.set_extra_property( 'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4))) assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4))) return assign_result # 2. Compute the L1 cost between boxes # Note that we use anchors and predict boxes both cost_bbox = torch.cdist( bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) cost_bbox_anchors = torch.cdist( bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) # We found that topk function has different results in cpu and # cuda mode. In order to ensure consistency with the source code, # we also use cpu mode. # TODO: Check whether the performance of cpu and cuda are the same. C = cost_bbox.cpu() C1 = cost_bbox_anchors.cpu() # self.match_times x n index = torch.topk( C, # c=b,n,x c[i]=n,x k=self.match_times, dim=0, largest=False)[1] # self.match_times x n index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] # (self.match_times*2) x n indexes = torch.cat((index, index1), dim=1).reshape(-1).to(bbox_pred.device) pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) anchor_overlaps = self.iou_calculator(anchor, gt_bboxes) pred_max_overlaps, _ = pred_overlaps.max(dim=1) anchor_max_overlaps, _ = anchor_overlaps.max(dim=0) # 3. Compute the ignore indexes use gt_bboxes and predict boxes ignore_idx = pred_max_overlaps > self.neg_ignore_thr assigned_gt_inds[ignore_idx] = -1 # 4. Compute the ignore indexes of positive sample use anchors # and predict boxes pos_gt_index = torch.arange( 0, C1.size(1), device=bbox_pred.device).repeat(self.match_times * 2) pos_ious = anchor_overlaps[indexes, pos_gt_index] pos_ignore_idx = pos_ious < self.pos_ignore_thr pos_gt_index_with_ignore = pos_gt_index + 1 pos_gt_index_with_ignore[pos_ignore_idx] = -1 assigned_gt_inds[indexes] = pos_gt_index_with_ignore if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None assign_result = AssignResult( num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels) assign_result.set_extra_property('pos_idx', ~pos_ignore_idx) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes]) assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index]) return assign_result ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/builder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg BBOX_ASSIGNERS = Registry('bbox_assigner') BBOX_SAMPLERS = Registry('bbox_sampler') BBOX_CODERS = Registry('bbox_coder') def build_assigner(cfg, **default_args): """Builder of box assigner.""" return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) def build_sampler(cfg, **default_args): """Builder of box sampler.""" return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) def build_bbox_coder(cfg, **default_args): """Builder of box coder.""" return build_from_cfg(cfg, BBOX_CODERS, default_args) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .base_bbox_coder import BaseBBoxCoder from .bucketing_bbox_coder import BucketingBBoxCoder from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder from .distance_point_bbox_coder import DistancePointBBoxCoder from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder from .pseudo_bbox_coder import PseudoBBoxCoder from .tblr_bbox_coder import TBLRBBoxCoder from .yolo_bbox_coder import YOLOBBoxCoder __all__ = [ 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', 'BucketingBBoxCoder', 'DistancePointBBoxCoder' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/base_bbox_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseBBoxCoder(metaclass=ABCMeta): """Base bounding box coder.""" def __init__(self, **kwargs): pass @abstractmethod def encode(self, bboxes, gt_bboxes): """Encode deltas between bboxes and ground truth boxes.""" @abstractmethod def decode(self, bboxes, bboxes_pred): """Decode the predicted bboxes according to prediction and base boxes.""" ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/bucketing_bbox_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch import torch.nn.functional as F from ..builder import BBOX_CODERS from ..transforms import bbox_rescale from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class BucketingBBoxCoder(BaseBBoxCoder): """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL). Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented here. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_buckets (int): Number of buckets. scale_factor (int): Scale factor of proposals to generate buckets. offset_topk (int): Topk buckets are used to generate bucket fine regression targets. Defaults to 2. offset_upperbound (float): Offset upperbound to generate bucket fine regression targets. To avoid too large offset displacements. Defaults to 1.0. cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. Defaults to True. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, num_buckets, scale_factor, offset_topk=2, offset_upperbound=1.0, cls_ignore_neighbor=True, clip_border=True): super(BucketingBBoxCoder, self).__init__() self.num_buckets = num_buckets self.scale_factor = scale_factor self.offset_topk = offset_topk self.offset_upperbound = offset_upperbound self.cls_ignore_neighbor = cls_ignore_neighbor self.clip_border = clip_border def encode(self, bboxes, gt_bboxes): """Get bucketing estimation and fine regression targets during training. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground truth boxes. Returns: encoded_bboxes(tuple[Tensor]): bucketing estimation and fine regression targets and weights """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets, self.scale_factor, self.offset_topk, self.offset_upperbound, self.cls_ignore_neighbor) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes. pred_bboxes (torch.Tensor): Predictions for bucketing estimation and fine regression max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. Returns: torch.Tensor: Decoded boxes. """ assert len(pred_bboxes) == 2 cls_preds, offset_preds = pred_bboxes assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size( 0) == bboxes.size(0) decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds, self.num_buckets, self.scale_factor, max_shape, self.clip_border) return decoded_bboxes @mmcv.jit(coderize=True) def generat_buckets(proposals, num_buckets, scale_factor=1.0): """Generate buckets w.r.t bucket number and scale factor of proposals. Args: proposals (Tensor): Shape (n, 4) num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. Returns: tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets) - bucket_w: Width of buckets on x-axis. Shape (n, ). - bucket_h: Height of buckets on y-axis. Shape (n, ). - l_buckets: Left buckets. Shape (n, ceil(side_num/2)). - r_buckets: Right buckets. Shape (n, ceil(side_num/2)). - t_buckets: Top buckets. Shape (n, ceil(side_num/2)). - d_buckets: Down buckets. Shape (n, ceil(side_num/2)). """ proposals = bbox_rescale(proposals, scale_factor) # number of buckets in each side side_num = int(np.ceil(num_buckets / 2.0)) pw = proposals[..., 2] - proposals[..., 0] ph = proposals[..., 3] - proposals[..., 1] px1 = proposals[..., 0] py1 = proposals[..., 1] px2 = proposals[..., 2] py2 = proposals[..., 3] bucket_w = pw / num_buckets bucket_h = ph / num_buckets # left buckets l_buckets = px1[:, None] + (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] # right buckets r_buckets = px2[:, None] - (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] # top buckets t_buckets = py1[:, None] + (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] # down buckets d_buckets = py2[:, None] - (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets @mmcv.jit(coderize=True) def bbox2bucket(proposals, gt, num_buckets, scale_factor, offset_topk=2, offset_upperbound=1.0, cls_ignore_neighbor=True): """Generate buckets estimation and fine regression targets. Args: proposals (Tensor): Shape (n, 4) gt (Tensor): Shape (n, 4) num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. offset_topk (int): Topk buckets are used to generate bucket fine regression targets. Defaults to 2. offset_upperbound (float): Offset allowance to generate bucket fine regression targets. To avoid too large offset displacements. Defaults to 1.0. cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. Defaults to True. Returns: tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights). - offsets: Fine regression targets. \ Shape (n, num_buckets*2). - offsets_weights: Fine regression weights. \ Shape (n, num_buckets*2). - bucket_labels: Bucketing estimation labels. \ Shape (n, num_buckets*2). - cls_weights: Bucketing estimation weights. \ Shape (n, num_buckets*2). """ assert proposals.size() == gt.size() # generate buckets proposals = proposals.float() gt = gt.float() (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets) = generat_buckets(proposals, num_buckets, scale_factor) gx1 = gt[..., 0] gy1 = gt[..., 1] gx2 = gt[..., 2] gy2 = gt[..., 3] # generate offset targets and weights # offsets from buckets to gts l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None] r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None] t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None] d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None] # select top-k nearest buckets l_topk, l_label = l_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) r_topk, r_label = r_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) t_topk, t_label = t_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) d_topk, d_label = d_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) offset_l_weights = l_offsets.new_zeros(l_offsets.size()) offset_r_weights = r_offsets.new_zeros(r_offsets.size()) offset_t_weights = t_offsets.new_zeros(t_offsets.size()) offset_d_weights = d_offsets.new_zeros(d_offsets.size()) inds = torch.arange(0, proposals.size(0)).to(proposals).long() # generate offset weights of top-k nearest buckets for k in range(offset_topk): if k >= 1: offset_l_weights[inds, l_label[:, k]] = (l_topk[:, k] < offset_upperbound).float() offset_r_weights[inds, r_label[:, k]] = (r_topk[:, k] < offset_upperbound).float() offset_t_weights[inds, t_label[:, k]] = (t_topk[:, k] < offset_upperbound).float() offset_d_weights[inds, d_label[:, k]] = (d_topk[:, k] < offset_upperbound).float() else: offset_l_weights[inds, l_label[:, k]] = 1.0 offset_r_weights[inds, r_label[:, k]] = 1.0 offset_t_weights[inds, t_label[:, k]] = 1.0 offset_d_weights[inds, d_label[:, k]] = 1.0 offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1) offsets_weights = torch.cat([ offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights ], dim=-1) # generate bucket labels and weight side_num = int(np.ceil(num_buckets / 2.0)) labels = torch.stack( [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1) batch_size = labels.size(0) bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size, -1).float() bucket_cls_l_weights = (l_offsets.abs() < 1).float() bucket_cls_r_weights = (r_offsets.abs() < 1).float() bucket_cls_t_weights = (t_offsets.abs() < 1).float() bucket_cls_d_weights = (d_offsets.abs() < 1).float() bucket_cls_weights = torch.cat([ bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights, bucket_cls_d_weights ], dim=-1) # ignore second nearest buckets for cls if necessary if cls_ignore_neighbor: bucket_cls_weights = (~((bucket_cls_weights == 1) & (bucket_labels == 0))).float() else: bucket_cls_weights[:] = 1.0 return offsets, offsets_weights, bucket_labels, bucket_cls_weights @mmcv.jit(coderize=True) def bucket2bbox(proposals, cls_preds, offset_preds, num_buckets, scale_factor=1.0, max_shape=None, clip_border=True): """Apply bucketing estimation (cls preds) and fine regression (offset preds) to generate det bboxes. Args: proposals (Tensor): Boxes to be transformed. Shape (n, 4) cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2). offset_preds (Tensor): fine regression. Shape (n, num_buckets*2). num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Returns: tuple[Tensor]: (bboxes, loc_confidence). - bboxes: predicted bboxes. Shape (n, 4) - loc_confidence: localization confidence of predicted bboxes. Shape (n,). """ side_num = int(np.ceil(num_buckets / 2.0)) cls_preds = cls_preds.view(-1, side_num) offset_preds = offset_preds.view(-1, side_num) scores = F.softmax(cls_preds, dim=1) score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True) rescaled_proposals = bbox_rescale(proposals, scale_factor) pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0] ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1] px1 = rescaled_proposals[..., 0] py1 = rescaled_proposals[..., 1] px2 = rescaled_proposals[..., 2] py2 = rescaled_proposals[..., 3] bucket_w = pw / num_buckets bucket_h = ph / num_buckets score_inds_l = score_label[0::4, 0] score_inds_r = score_label[1::4, 0] score_inds_t = score_label[2::4, 0] score_inds_d = score_label[3::4, 0] l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h offsets = offset_preds.view(-1, 4, side_num) inds = torch.arange(proposals.size(0)).to(proposals).long() l_offsets = offsets[:, 0, :][inds, score_inds_l] r_offsets = offsets[:, 1, :][inds, score_inds_r] t_offsets = offsets[:, 2, :][inds, score_inds_t] d_offsets = offsets[:, 3, :][inds, score_inds_d] x1 = l_buckets - l_offsets * bucket_w x2 = r_buckets - r_offsets * bucket_w y1 = t_buckets - t_offsets * bucket_h y2 = d_buckets - d_offsets * bucket_h if clip_border and max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]], dim=-1) # bucketing guided rescoring loc_confidence = score_topk[:, 0] top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1 loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float() loc_confidence = loc_confidence.view(-1, 4).mean(dim=1) return bboxes, loc_confidence ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv import numpy as np import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class DeltaXYWHBBoxCoder(BaseBBoxCoder): """Delta XYWH BBox coder. Following the practice in `R-CNN `_, this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). Args: target_means (Sequence[float]): Denormalizing means of target for delta coordinates target_stds (Sequence[float]): Denormalizing standard deviation of target for delta coordinates clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. """ def __init__(self, target_means=(0., 0., 0., 0.), target_stds=(1., 1., 1., 1.), clip_border=True, add_ctr_clamp=False, ctr_clamp=32): super(BaseBBoxCoder, self).__init__() self.means = target_means self.stds = target_stds self.clip_border = clip_border self.add_ctr_clamp = add_ctr_clamp self.ctr_clamp = ctr_clamp def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): Source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): Target of the transformation, e.g., ground-truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None, wh_ratio_clip=16 / 1000): """Apply transformation `pred_bboxes` to `boxes`. Args: bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4) pred_bboxes (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) if pred_bboxes.ndim == 3: assert pred_bboxes.size(1) == bboxes.size(1) if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export(): # single image decode decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip, self.clip_border, self.add_ctr_clamp, self.ctr_clamp) else: if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export(): warnings.warn( 'DeprecationWarning: onnx_delta2bbox is deprecated ' 'in the case of batch decoding and non-ONNX, ' 'please use “delta2bbox” instead. In order to improve ' 'the decoding speed, the batch function will no ' 'longer be supported. ') decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip, self.clip_border, self.add_ctr_clamp, self.ctr_clamp) return decoded_bboxes @mmcv.jit(coderize=True) def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)): """Compute deltas of proposals w.r.t. gt. We usually compute the deltas of x, y, w, h of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates Returns: Tensor: deltas with shape (N, 4), where columns represent dx, dy, dw, dh. """ assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] ph = proposals[..., 3] - proposals[..., 1] gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] gh = gt[..., 3] - gt[..., 1] dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas @mmcv.jit(coderize=True) def delta2bbox(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000, clip_border=True, add_ctr_clamp=False, ctr_clamp=32): """Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4). deltas (Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 4) or (N, 4). Note N = num_base_anchors * W * H, when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1.). max_shape (tuple[int, int]): Maximum bounds for boxes, specifies (H, W). Default None. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. clip_border (bool, optional): Whether clip the objects outside the border of the image. Default True. add_ctr_clamp (bool): Whether to add center clamp. When set to True, the center of the prediction bounding box will be clamped to avoid being too far away from the center of the anchor. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Returns: Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) """ num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4 if num_bboxes == 0: return deltas deltas = deltas.reshape(-1, 4) means = deltas.new_tensor(means).view(1, -1) stds = deltas.new_tensor(stds).view(1, -1) denorm_deltas = deltas * stds + means dxy = denorm_deltas[:, :2] dwh = denorm_deltas[:, 2:] # Compute width/height of each roi rois_ = rois.repeat(1, num_classes).reshape(-1, 4) pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5) pwh = (rois_[:, 2:] - rois_[:, :2]) dxy_wh = pwh * dxy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) dwh = torch.clamp(dwh, max=max_ratio) else: dwh = dwh.clamp(min=-max_ratio, max=max_ratio) gxy = pxy + dxy_wh gwh = pwh * dwh.exp() x1y1 = gxy - (gwh * 0.5) x2y2 = gxy + (gwh * 0.5) bboxes = torch.cat([x1y1, x2y2], dim=-1) if clip_border and max_shape is not None: bboxes[..., 0::2].clamp_(min=0, max=max_shape[1]) bboxes[..., 1::2].clamp_(min=0, max=max_shape[0]) bboxes = bboxes.reshape(num_bboxes, -1) return bboxes def onnx_delta2bbox(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000, clip_border=True, add_ctr_clamp=False, ctr_clamp=32): """Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1.). max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If rois shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Default None. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. clip_border (bool, optional): Whether clip the objects outside the border of the image. Default True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Returns: Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) """ means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(-1) // 4) stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[..., 0::4] dy = denorm_deltas[..., 1::4] dw = denorm_deltas[..., 2::4] dh = denorm_deltas[..., 3::4] x1, y1 = rois[..., 0], rois[..., 1] x2, y2 = rois[..., 2], rois[..., 3] # Compute center of each roi px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx) py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy) # Compute width/height of each roi pw = (x2 - x1).unsqueeze(-1).expand_as(dw) ph = (y2 - y1).unsqueeze(-1).expand_as(dh) dx_width = pw * dx dy_height = ph * dy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp) dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp) dw = torch.clamp(dw, max=max_ratio) dh = torch.clamp(dh, max=max_ratio) else: dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = px + dx_width gy = py + dy_height # Convert center-xy/width/height to top-left, bottom-right x1 = gx - gw * 0.5 y1 = gy - gh * 0.5 x2 = gx + gw * 0.5 y2 = gy + gh * 0.5 bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) if clip_border and max_shape is not None: # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = x1.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(x1) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = x1.new_tensor(0) max_xy = torch.cat( [max_shape] * (deltas.size(-1) // 2), dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/distance_point_bbox_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import BBOX_CODERS from ..transforms import bbox2distance, distance2bbox from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class DistancePointBBoxCoder(BaseBBoxCoder): """Distance Point BBox coder. This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, right) and decode it back to the original. Args: clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, clip_border=True): super(BaseBBoxCoder, self).__init__() self.clip_border = clip_border def encode(self, points, gt_bboxes, max_dis=None, eps=0.1): """Encode bounding box to distances. Args: points (Tensor): Shape (N, 2), The format is [x, y]. gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy" max_dis (float): Upper bound of the distance. Default None. eps (float): a small value to ensure target < max_dis, instead <=. Default 0.1. Returns: Tensor: Box transformation deltas. The shape is (N, 4). """ assert points.size(0) == gt_bboxes.size(0) assert points.size(-1) == 2 assert gt_bboxes.size(-1) == 4 return bbox2distance(points, gt_bboxes, max_dis, eps) def decode(self, points, pred_bboxes, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). pred_bboxes (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]], and the length of max_shape should also be B. Default None. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) """ assert points.size(0) == pred_bboxes.size(0) assert points.size(-1) == 2 assert pred_bboxes.size(-1) == 4 if self.clip_border is False: max_shape = None return distance2bbox(points, pred_bboxes, max_shape) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder): """Legacy Delta XYWH BBox coder used in MMDet V1.x. Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). Note: The main difference between :class`LegacyDeltaXYWHBBoxCoder` and :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and height calculation. We suggest to only use this coder when testing with MMDet V1.x models. References: .. [1] https://arxiv.org/abs/1311.2524 Args: target_means (Sequence[float]): denormalizing means of target for delta coordinates target_stds (Sequence[float]): denormalizing standard deviation of target for delta coordinates """ def __init__(self, target_means=(0., 0., 0., 0.), target_stds=(1., 1., 1., 1.)): super(BaseBBoxCoder, self).__init__() self.means = target_means self.stds = target_stds def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground-truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None, wh_ratio_clip=16 / 1000): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes. pred_bboxes (torch.Tensor): Encoded boxes with shape max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip) return decoded_bboxes @mmcv.jit(coderize=True) def legacy_bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)): """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner. We usually compute the deltas of x, y, w, h of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of `delta2bbox()` Args: proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates Returns: Tensor: deltas with shape (N, 4), where columns represent dx, dy, dw, dh. """ assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] + 1.0 ph = proposals[..., 3] - proposals[..., 1] + 1.0 gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] + 1.0 gh = gt[..., 3] - gt[..., 1] + 1.0 dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas @mmcv.jit(coderize=True) def legacy_delta2bbox(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000): """Apply deltas to shift/scale base boxes in the MMDet V1.x manner. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of `bbox2delta()` Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) wh_ratio_clip (float): Maximum aspect ratio for boxes. Returns: Tensor: Boxes with shape (N, 4), where columns represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32)) tensor([[0.0000, 0.0000, 1.5000, 1.5000], [0.0000, 0.0000, 5.2183, 5.2183], [0.0000, 0.1321, 7.8891, 0.8679], [5.3967, 2.4251, 6.0033, 3.7749]]) """ means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[:, 0::4] dy = denorm_deltas[:, 1::4] dw = denorm_deltas[:, 2::4] dh = denorm_deltas[:, 3::4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Compute center of each roi px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) # Compute width/height of each roi pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = px + pw * dx gy = py + ph * dy # Convert center-xy/width/height to top-left, bottom-right # The true legacy box coder should +- 0.5 here. # However, current implementation improves the performance when testing # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP) x1 = gx - gw * 0.5 y1 = gy - gh * 0.5 x2 = gx + gw * 0.5 y2 = gy + gh * 0.5 if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) return bboxes ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/pseudo_bbox_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class PseudoBBoxCoder(BaseBBoxCoder): """Pseudo bounding box coder.""" def __init__(self, **kwargs): super(BaseBBoxCoder, self).__init__(**kwargs) def encode(self, bboxes, gt_bboxes): """torch.Tensor: return the given ``bboxes``""" return gt_bboxes def decode(self, bboxes, pred_bboxes): """torch.Tensor: return the given ``pred_bboxes``""" return pred_bboxes ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/tblr_bbox_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class TBLRBBoxCoder(BaseBBoxCoder): """TBLR BBox coder. Following the practice in `FSAF `_, this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, right) and decode it back to the original. Args: normalizer (list | float): Normalization factor to be divided with when coding the coordinates. If it is a list, it should have length of 4 indicating normalization factor in tblr dims. Otherwise it is a unified float factor for all dims. Default: 4.0 clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, normalizer=4.0, clip_border=True): super(BaseBBoxCoder, self).__init__() self.normalizer = normalizer self.clip_border = clip_border def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left, bottom, right) order. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bboxes2tblr( bboxes, gt_bboxes, normalizer=self.normalizer) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None): """Apply transformation `pred_bboxes` to `boxes`. Args: bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4) pred_bboxes (torch.Tensor): Encoded boxes with shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: torch.Tensor: Decoded boxes. """ decoded_bboxes = tblr2bboxes( bboxes, pred_bboxes, normalizer=self.normalizer, max_shape=max_shape, clip_border=self.clip_border) return decoded_bboxes @mmcv.jit(coderize=True) def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True): """Encode ground truth boxes to tblr coordinate. It first convert the gt coordinate to tblr format, (top, bottom, left, right), relative to prior box centers. The tblr coordinate may be normalized by the side length of prior bboxes if `normalize_by_wh` is specified as True, and it is then normalized by the `normalizer` factor. Args: priors (Tensor): Prior boxes in point form Shape: (num_proposals,4). gts (Tensor): Coords of ground truth for each prior in point-form Shape: (num_proposals, 4). normalizer (Sequence[float] | float): normalization parameter of encoded boxes. If it is a list, it has to have length = 4. Default: 4.0 normalize_by_wh (bool): Whether to normalize tblr coordinate by the side length (wh) of prior bboxes. Return: encoded boxes (Tensor), Shape: (num_proposals, 4) """ # dist b/t match center and prior's center if not isinstance(normalizer, float): normalizer = torch.tensor(normalizer, device=priors.device) assert len(normalizer) == 4, 'Normalizer must have length = 4' assert priors.size(0) == gts.size(0) prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2 xmin, ymin, xmax, ymax = gts.split(1, dim=1) top = prior_centers[:, 1].unsqueeze(1) - ymin bottom = ymax - prior_centers[:, 1].unsqueeze(1) left = prior_centers[:, 0].unsqueeze(1) - xmin right = xmax - prior_centers[:, 0].unsqueeze(1) loc = torch.cat((top, bottom, left, right), dim=1) if normalize_by_wh: # Normalize tblr by anchor width and height wh = priors[:, 2:4] - priors[:, 0:2] w, h = torch.split(wh, 1, dim=1) loc[:, :2] /= h # tb is normalized by h loc[:, 2:] /= w # lr is normalized by w # Normalize tblr by the given normalization factor return loc / normalizer @mmcv.jit(coderize=True) def tblr2bboxes(priors, tblr, normalizer=4.0, normalize_by_wh=True, max_shape=None, clip_border=True): """Decode tblr outputs to prediction boxes. The process includes 3 steps: 1) De-normalize tblr coordinates by multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert tblr (top, bottom, left, right) pair relative to the center of priors back to (xmin, ymin, xmax, ymax) coordinate. Args: priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) Shape: (N,4) or (B, N, 4). tblr (Tensor): Coords of network output in tblr form Shape: (N, 4) or (B, N, 4). normalizer (Sequence[float] | float): Normalization parameter of encoded boxes. By list, it represents the normalization factors at tblr dims. By float, it is the unified normalization factor at all dims. Default: 4.0 normalize_by_wh (bool): Whether the tblr coordinates have been normalized by the side length (wh) of prior bboxes. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Return: encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4) """ if not isinstance(normalizer, float): normalizer = torch.tensor(normalizer, device=priors.device) assert len(normalizer) == 4, 'Normalizer must have length = 4' assert priors.size(0) == tblr.size(0) if priors.ndim == 3: assert priors.size(1) == tblr.size(1) loc_decode = tblr * normalizer prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2 if normalize_by_wh: wh = priors[..., 2:4] - priors[..., 0:2] w, h = torch.split(wh, 1, dim=-1) # Inplace operation with slice would failed for exporting to ONNX th = h * loc_decode[..., :2] # tb tw = w * loc_decode[..., 2:] # lr loc_decode = torch.cat([th, tw], dim=-1) # Cannot be exported using onnx when loc_decode.split(1, dim=-1) top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1) xmin = prior_centers[..., 0].unsqueeze(-1) - left xmax = prior_centers[..., 0].unsqueeze(-1) + right ymin = prior_centers[..., 1].unsqueeze(-1) - top ymax = prior_centers[..., 1].unsqueeze(-1) + bottom bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1) if clip_border and max_shape is not None: # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx xmin, ymin, xmax, ymax = dynamic_clip_for_onnx( xmin, ymin, xmax, ymax, max_shape) bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = priors.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(priors) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = priors.new_tensor(0) max_xy = torch.cat([max_shape, max_shape], dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/yolo_bbox_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class YOLOBBoxCoder(BaseBBoxCoder): """YOLO BBox coder. Following `YOLO `_, this coder divide image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh). cx, cy in [0., 1.], denotes relative center position w.r.t the center of bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`. Args: eps (float): Min value of cx, cy when encoding. """ def __init__(self, eps=1e-6): super(BaseBBoxCoder, self).__init__() self.eps = eps @mmcv.jit(coderize=True) def encode(self, bboxes, gt_bboxes, stride): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): Source boxes, e.g., anchors. gt_bboxes (torch.Tensor): Target of the transformation, e.g., ground-truth boxes. stride (torch.Tensor | int): Stride of bboxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 w = bboxes[..., 2] - bboxes[..., 0] h = bboxes[..., 3] - bboxes[..., 1] w_target = torch.log((w_gt / w).clamp(min=self.eps)) h_target = torch.log((h_gt / h).clamp(min=self.eps)) x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) encoded_bboxes = torch.stack( [x_center_target, y_center_target, w_target, h_target], dim=-1) return encoded_bboxes @mmcv.jit(coderize=True) def decode(self, bboxes, pred_bboxes, stride): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes, e.g. anchors. pred_bboxes (torch.Tensor): Encoded boxes with shape stride (torch.Tensor | int): Strides of bboxes. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(-1) == bboxes.size(-1) == 4 xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + ( pred_bboxes[..., :2] - 0.5) * stride whs = (bboxes[..., 2:] - bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp() decoded_bboxes = torch.stack( (xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] - whs[..., 1], xy_centers[..., 0] + whs[..., 0], xy_centers[..., 1] + whs[..., 1]), dim=-1) return decoded_bboxes ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/demodata.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.utils.util_random import ensure_rng def random_boxes(num=1, scale=1, rng=None): """Simple version of ``kwimage.Boxes.random`` Returns: Tensor: shape (n, 4) in x1, y1, x2, y2 format. References: https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 Example: >>> num = 3 >>> scale = 512 >>> rng = 0 >>> boxes = random_boxes(num, scale, rng) >>> print(boxes) tensor([[280.9925, 278.9802, 308.6148, 366.1769], [216.9113, 330.6978, 224.0446, 456.5878], [405.3632, 196.3221, 493.3953, 270.7942]]) """ rng = ensure_rng(rng) tlbr = rng.rand(num, 4).astype(np.float32) tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) tlbr[:, 0] = tl_x * scale tlbr[:, 1] = tl_y * scale tlbr[:, 2] = br_x * scale tlbr[:, 3] = br_y * scale boxes = torch.from_numpy(tlbr) return boxes ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/iou_calculators/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .builder import build_iou_calculator from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps'] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/iou_calculators/builder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg IOU_CALCULATORS = Registry('IoU calculator') def build_iou_calculator(cfg, default_args=None): """Builder of IoU calculator.""" return build_from_cfg(cfg, IOU_CALCULATORS, default_args) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from .builder import IOU_CALCULATORS def cast_tensor_type(x, scale=1., dtype=None): if dtype == 'fp16': # scale is for preventing overflows x = (x / scale).half() return x def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) @IOU_CALCULATORS.register_module() class BboxOverlaps2D: """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" def __init__(self, scale=1., dtype=None): self.scale = scale self.dtype = dtype def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): """Calculate IoU between 2D bboxes. Args: bboxes1 (Tensor): bboxes have shape (m, 4) in format, or shape (m, 5) in format. bboxes2 (Tensor): bboxes have shape (n, 4) in format, shape (n, 5) in format, or be empty. mode (str): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. Returns: Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) """ assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32 overlaps = overlaps.float() return overlaps return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) def __repr__(self): """str: a string describing the module""" repr_str = self.__class__.__name__ + f'(' \ f'scale={self.scale}, dtype={self.dtype})' return repr_str def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): """Calculate overlap between two set of bboxes. FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 Note: Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', there are some new generated variable when calculating IOU using bbox_overlaps function: 1) is_aligned is False area1: M x 1 area2: N x 1 lt: M x N x 2 rb: M x N x 2 wh: M x N x 2 overlap: M x N x 1 union: M x N x 1 ious: M x N x 1 Total memory: S = (9 x N x M + N + M) * 4 Byte, When using FP16, we can reduce: R = (9 x N x M + N + M) * 4 / 2 Byte R large than (N + M) * 4 * 2 is always true when N and M >= 1. Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, N + 1 < 3 * N, when N or M is 1. Given M = 40 (ground truth), N = 400000 (three anchor boxes in per grid, FPN, R-CNNs), R = 275 MB (one times) A special case (dense detection), M = 512 (ground truth), R = 3516 MB = 3.43 GB When the batch size is B, reduce: B x R Therefore, CUDA memory runs out frequently. Experiments on GeForce RTX 2080Ti (11019 MiB): | dtype | M | N | Use | Real | Ideal | |:----:|:----:|:----:|:----:|:----:|:----:| | FP32 | 512 | 400000 | 8020 MiB | -- | -- | | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | | FP32 | 40 | 400000 | 1540 MiB | -- | -- | | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | 2) is_aligned is True area1: N x 1 area2: N x 1 lt: N x 2 rb: N x 2 wh: N x 2 overlap: N x 1 union: N x 1 ious: N x 1 Total memory: S = 11 x N * 4 Byte When using FP16, we can reduce: R = 11 x N * 4 / 2 Byte So do the 'giou' (large than 'iou'). Time-wise, FP16 is generally faster than FP32. When gpu_assign_thr is not -1, it takes more time on cpu but not reduce memory. There, we can reduce half the memory and keep the speed. If ``is_aligned`` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 4) in format or empty. bboxes2 (Tensor): shape (B, n, 4) in format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned`` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground) or "giou" (generalized intersection over union). Default "iou". is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 10, 10], >>> [10, 10, 20, 20], >>> [32, 32, 38, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 10, 20], >>> [0, 10, 10, 19], >>> [10, 10, 20, 20], >>> ]) >>> overlaps = bbox_overlaps(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 4) >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) """ assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' # Either the boxes are empty or the length of boxes' last dimension is 4 assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) # Batch dim must be the same # Batch dim: (B1, B2, ... Bn) assert bboxes1.shape[:-2] == bboxes2.shape[:-2] batch_shape = bboxes1.shape[:-2] rows = bboxes1.size(-2) cols = bboxes2.size(-2) if is_aligned: assert rows == cols if rows * cols == 0: if is_aligned: return bboxes1.new(batch_shape + (rows, )) else: return bboxes1.new(batch_shape + (rows, cols)) area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( bboxes1[..., 3] - bboxes1[..., 1]) area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( bboxes2[..., 3] - bboxes2[..., 1]) if is_aligned: lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1 + area2 - overlap else: union = area1 if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) else: lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) # [B, rows, cols, 2] rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1[..., None] + area2[..., None, :] - overlap else: union = area1[..., None] if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) eps = union.new_tensor([eps]) union = torch.max(union, eps) ious = overlap / union if mode in ['iou', 'iof']: return ious # calculate gious enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] enclose_area = torch.max(enclose_area, eps) gious = ious - (enclose_area - union) / enclose_area return gious ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/match_costs/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .builder import build_match_cost from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost, DiceCost, FocalLossCost, IoUCost) __all__ = [ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 'FocalLossCost', 'DiceCost', 'CrossEntropyLossCost' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/match_costs/builder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg MATCH_COST = Registry('Match Cost') def build_match_cost(cfg, default_args=None): """Builder of IoU calculator.""" return build_from_cfg(cfg, MATCH_COST, default_args) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/match_costs/match_cost.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn.functional as F from mmdet.core.bbox.iou_calculators import bbox_overlaps from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh from .builder import MATCH_COST @MATCH_COST.register_module() class BBoxL1Cost: """BBoxL1Cost. Args: weight (int | float, optional): loss_weight box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN Examples: >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost >>> import torch >>> self = BBoxL1Cost() >>> bbox_pred = torch.rand(1, 4) >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(bbox_pred, gt_bboxes, factor) tensor([[1.6172, 1.6422]]) """ def __init__(self, weight=1., box_format='xyxy'): self.weight = weight assert box_format in ['xyxy', 'xywh'] self.box_format = box_format def __call__(self, bbox_pred, gt_bboxes): """ Args: bbox_pred (Tensor): Predicted boxes with normalized coordinates (cx, cy, w, h), which are all in range [0, 1]. Shape (num_query, 4). gt_bboxes (Tensor): Ground truth boxes with normalized coordinates (x1, y1, x2, y2). Shape (num_gt, 4). Returns: torch.Tensor: bbox_cost value with weight """ if self.box_format == 'xywh': gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) elif self.box_format == 'xyxy': bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) return bbox_cost * self.weight @MATCH_COST.register_module() class FocalLossCost: """FocalLossCost. Args: weight (int | float, optional): loss_weight alpha (int | float, optional): focal_loss alpha gamma (int | float, optional): focal_loss gamma eps (float, optional): default 1e-12 binary_input (bool, optional): Whether the input is binary, default False. Examples: >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost >>> import torch >>> self = FocalLossCost() >>> cls_pred = torch.rand(4, 3) >>> gt_labels = torch.tensor([0, 1, 2]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(cls_pred, gt_labels) tensor([[-0.3236, -0.3364, -0.2699], [-0.3439, -0.3209, -0.4807], [-0.4099, -0.3795, -0.2929], [-0.1950, -0.1207, -0.2626]]) """ def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12, binary_input=False): self.weight = weight self.alpha = alpha self.gamma = gamma self.eps = eps self.binary_input = binary_input def _focal_loss_cost(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classification logits, shape (num_query, num_class). gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight """ cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] return cls_cost * self.weight def _mask_focal_loss_cost(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classfication logits in shape (num_query, d1, ..., dn), dtype=torch.float32. gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn), dtype=torch.long. Labels should be binary. Returns: Tensor: Focal cost matrix with weight in shape\ (num_query, num_gt). """ cls_pred = cls_pred.flatten(1) gt_labels = gt_labels.flatten(1).float() n = cls_pred.shape[1] cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) return cls_cost / n * self.weight def __call__(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classfication logits. gt_labels (Tensor)): Labels. Returns: Tensor: Focal cost matrix with weight in shape\ (num_query, num_gt). """ if self.binary_input: return self._mask_focal_loss_cost(cls_pred, gt_labels) else: return self._focal_loss_cost(cls_pred, gt_labels) @MATCH_COST.register_module() class ClassificationCost: """ClsSoftmaxCost. Args: weight (int | float, optional): loss_weight Examples: >>> from mmdet.core.bbox.match_costs.match_cost import \ ... ClassificationCost >>> import torch >>> self = ClassificationCost() >>> cls_pred = torch.rand(4, 3) >>> gt_labels = torch.tensor([0, 1, 2]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(cls_pred, gt_labels) tensor([[-0.3430, -0.3525, -0.3045], [-0.3077, -0.2931, -0.3992], [-0.3664, -0.3455, -0.2881], [-0.3343, -0.2701, -0.3956]]) """ def __init__(self, weight=1.): self.weight = weight def __call__(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classification logits, shape (num_query, num_class). gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight """ # Following the official DETR repo, contrary to the loss that # NLL is used, we approximate it in 1 - cls_score[gt_label]. # The 1 is a constant that doesn't change the matching, # so it can be omitted. cls_score = cls_pred.softmax(-1) cls_cost = -cls_score[:, gt_labels] return cls_cost * self.weight @MATCH_COST.register_module() class IoUCost: """IoUCost. Args: iou_mode (str, optional): iou mode such as 'iou' | 'giou' weight (int | float, optional): loss weight Examples: >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost >>> import torch >>> self = IoUCost() >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> self(bboxes, gt_bboxes) tensor([[-0.1250, 0.1667], [ 0.1667, -0.5000]]) """ def __init__(self, iou_mode='giou', weight=1.): self.weight = weight self.iou_mode = iou_mode def __call__(self, bboxes, gt_bboxes): """ Args: bboxes (Tensor): Predicted boxes with unnormalized coordinates (x1, y1, x2, y2). Shape (num_query, 4). gt_bboxes (Tensor): Ground truth boxes with unnormalized coordinates (x1, y1, x2, y2). Shape (num_gt, 4). Returns: torch.Tensor: iou_cost value with weight """ # overlaps: [num_bboxes, num_gt] overlaps = bbox_overlaps( bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) # The 1 is a constant that doesn't change the matching, so omitted. iou_cost = -overlaps return iou_cost * self.weight @MATCH_COST.register_module() class DiceCost: """Cost of mask assignments based on dice losses. Args: weight (int | float, optional): loss_weight. Defaults to 1. pred_act (bool, optional): Whether to apply sigmoid to mask_pred. Defaults to False. eps (float, optional): default 1e-12. naive_dice (bool, optional): If True, use the naive dice loss in which the power of the number in the denominator is the first power. If Flase, use the second power that is adopted by K-Net and SOLO. Defaults to True. """ def __init__(self, weight=1., pred_act=False, eps=1e-3, naive_dice=True): self.weight = weight self.pred_act = pred_act self.eps = eps self.naive_dice = naive_dice def binary_mask_dice_loss(self, mask_preds, gt_masks): """ Args: mask_preds (Tensor): Mask prediction in shape (num_query, *). gt_masks (Tensor): Ground truth in shape (num_gt, *) store 0 or 1, 0 for negative class and 1 for positive class. Returns: Tensor: Dice cost matrix in shape (num_query, num_gt). """ mask_preds = mask_preds.flatten(1) gt_masks = gt_masks.flatten(1).float() numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) if self.naive_dice: denominator = mask_preds.sum(-1)[:, None] + \ gt_masks.sum(-1)[None, :] else: denominator = mask_preds.pow(2).sum(1)[:, None] + \ gt_masks.pow(2).sum(1)[None, :] loss = 1 - (numerator + self.eps) / (denominator + self.eps) return loss def __call__(self, mask_preds, gt_masks): """ Args: mask_preds (Tensor): Mask prediction logits in shape (num_query, *) gt_masks (Tensor): Ground truth in shape (num_gt, *) Returns: Tensor: Dice cost matrix with weight in shape (num_query, num_gt). """ if self.pred_act: mask_preds = mask_preds.sigmoid() dice_cost = self.binary_mask_dice_loss(mask_preds, gt_masks) return dice_cost * self.weight @MATCH_COST.register_module() class CrossEntropyLossCost: """CrossEntropyLossCost. Args: weight (int | float, optional): loss weight. Defaults to 1. use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to True. Examples: >>> from mmdet.core.bbox.match_costs import CrossEntropyLossCost >>> import torch >>> bce = CrossEntropyLossCost(use_sigmoid=True) >>> cls_pred = torch.tensor([[7.6, 1.2], [-1.3, 10]]) >>> gt_labels = torch.tensor([[1, 1], [1, 0]]) >>> print(bce(cls_pred, gt_labels)) """ def __init__(self, weight=1., use_sigmoid=True): assert use_sigmoid, 'use_sigmoid = False is not supported yet.' self.weight = weight self.use_sigmoid = use_sigmoid def _binary_cross_entropy(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): The prediction with shape (num_query, 1, *) or (num_query, *). gt_labels (Tensor): The learning label of prediction with shape (num_gt, *). Returns: Tensor: Cross entropy cost matrix in shape (num_query, num_gt). """ cls_pred = cls_pred.flatten(1).float() gt_labels = gt_labels.flatten(1).float() n = cls_pred.shape[1] pos = F.binary_cross_entropy_with_logits( cls_pred, torch.ones_like(cls_pred), reduction='none') neg = F.binary_cross_entropy_with_logits( cls_pred, torch.zeros_like(cls_pred), reduction='none') cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \ torch.einsum('nc,mc->nm', neg, 1 - gt_labels) cls_cost = cls_cost / n return cls_cost def __call__(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classification logits. gt_labels (Tensor): Labels. Returns: Tensor: Cross entropy cost matrix with weight in shape (num_query, num_gt). """ if self.use_sigmoid: cls_cost = self._binary_cross_entropy(cls_pred, gt_labels) else: raise NotImplementedError return cls_cost * self.weight ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .base_sampler import BaseSampler from .combined_sampler import CombinedSampler from .instance_balanced_pos_sampler import InstanceBalancedPosSampler from .iou_balanced_neg_sampler import IoUBalancedNegSampler from .mask_pseudo_sampler import MaskPseudoSampler from .mask_sampling_result import MaskSamplingResult from .ohem_sampler import OHEMSampler from .pseudo_sampler import PseudoSampler from .random_sampler import RandomSampler from .sampling_result import SamplingResult from .score_hlr_sampler import ScoreHLRSampler __all__ = [ 'BaseSampler', 'PseudoSampler', 'RandomSampler', 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler', 'MaskSamplingResult' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/base_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch from .sampling_result import SamplingResult class BaseSampler(metaclass=ABCMeta): """Base class of samplers.""" def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): self.num = num self.pos_fraction = pos_fraction self.neg_pos_ub = neg_pos_ub self.add_gt_as_proposals = add_gt_as_proposals self.pos_sampler = self self.neg_sampler = self @abstractmethod def _sample_pos(self, assign_result, num_expected, **kwargs): """Sample positive samples.""" pass @abstractmethod def _sample_neg(self, assign_result, num_expected, **kwargs): """Sample negative samples.""" pass def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, **kwargs): """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: :obj:`SamplingResult`: Sampling result. Example: >>> from mmdet.core.bbox import RandomSampler >>> from mmdet.core.bbox import AssignResult >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes >>> rng = ensure_rng(None) >>> assign_result = AssignResult.random(rng=rng) >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) >>> gt_labels = None >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, >>> add_gt_as_proposals=False) >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) """ if len(bboxes.shape) < 2: bboxes = bboxes[None, :] bboxes = bboxes[:, :4] gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals and len(gt_bboxes) > 0: if gt_labels is None: raise ValueError( 'gt_labels must be given when add_gt_as_proposals is True') bboxes = torch.cat([gt_bboxes, bboxes], dim=0) assign_result.add_gt_(gt_labels) gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs) # We found that sampled indices have duplicated items occasionally. # (may be a bug of PyTorch) pos_inds = pos_inds.unique() num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes=bboxes, **kwargs) neg_inds = neg_inds.unique() sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/combined_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import BBOX_SAMPLERS, build_sampler from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class CombinedSampler(BaseSampler): """A sampler that combines positive sampler and negative sampler.""" def __init__(self, pos_sampler, neg_sampler, **kwargs): super(CombinedSampler, self).__init__(**kwargs) self.pos_sampler = build_sampler(pos_sampler, **kwargs) self.neg_sampler = build_sampler(neg_sampler, **kwargs) def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from ..builder import BBOX_SAMPLERS from .random_sampler import RandomSampler @BBOX_SAMPLERS.register_module() class InstanceBalancedPosSampler(RandomSampler): """Instance balanced sampler that samples equal number of positive samples for each instance.""" def _sample_pos(self, assign_result, num_expected, **kwargs): """Sample positive boxes. Args: assign_result (:obj:`AssignResult`): The assigned results of boxes. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: unique_gt_inds = assign_result.gt_inds[pos_inds].unique() num_gts = len(unique_gt_inds) num_per_gt = int(round(num_expected / float(num_gts)) + 1) sampled_inds = [] for i in unique_gt_inds: inds = torch.nonzero( assign_result.gt_inds == i.item(), as_tuple=False) if inds.numel() != 0: inds = inds.squeeze(1) else: continue if len(inds) > num_per_gt: inds = self.random_choice(inds, num_per_gt) sampled_inds.append(inds) sampled_inds = torch.cat(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array( list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) extra_inds = torch.from_numpy(extra_inds).to( assign_result.gt_inds.device).long() sampled_inds = torch.cat([sampled_inds, extra_inds]) elif len(sampled_inds) > num_expected: sampled_inds = self.random_choice(sampled_inds, num_expected) return sampled_inds ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from ..builder import BBOX_SAMPLERS from .random_sampler import RandomSampler @BBOX_SAMPLERS.register_module() class IoUBalancedNegSampler(RandomSampler): """IoU Balanced Sampling. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Sampling proposals according to their IoU. `floor_fraction` of needed RoIs are sampled from proposals whose IoU are lower than `floor_thr` randomly. The others are sampled from proposals whose IoU are higher than `floor_thr`. These proposals are sampled from some bins evenly, which are split by `num_bins` via IoU evenly. Args: num (int): number of proposals. pos_fraction (float): fraction of positive proposals. floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, set to -1 if all using IoU balanced sampling. floor_fraction (float): sampling fraction of proposals under floor_thr. num_bins (int): number of bins in IoU balanced sampling. """ def __init__(self, num, pos_fraction, floor_thr=-1, floor_fraction=0, num_bins=3, **kwargs): super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs) assert floor_thr >= 0 or floor_thr == -1 assert 0 <= floor_fraction <= 1 assert num_bins >= 1 self.floor_thr = floor_thr self.floor_fraction = floor_fraction self.num_bins = num_bins def sample_via_interval(self, max_overlaps, full_set, num_expected): """Sample according to the iou interval. Args: max_overlaps (torch.Tensor): IoU between bounding boxes and ground truth boxes. full_set (set(int)): A full set of indices of boxes。 num_expected (int): Number of expected samples。 Returns: np.ndarray: Indices of samples """ max_iou = max_overlaps.max() iou_interval = (max_iou - self.floor_thr) / self.num_bins per_num_expected = int(num_expected / self.num_bins) sampled_inds = [] for i in range(self.num_bins): start_iou = self.floor_thr + i * iou_interval end_iou = self.floor_thr + (i + 1) * iou_interval tmp_set = set( np.where( np.logical_and(max_overlaps >= start_iou, max_overlaps < end_iou))[0]) tmp_inds = list(tmp_set & full_set) if len(tmp_inds) > per_num_expected: tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(full_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds def _sample_neg(self, assign_result, num_expected, **kwargs): """Sample negative boxes. Args: assign_result (:obj:`AssignResult`): The assigned results of boxes. num_expected (int): The number of expected negative samples Returns: Tensor or ndarray: sampled indices. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: max_overlaps = assign_result.max_overlaps.cpu().numpy() # balance sampling for negative samples neg_set = set(neg_inds.cpu().numpy()) if self.floor_thr > 0: floor_set = set( np.where( np.logical_and(max_overlaps >= 0, max_overlaps < self.floor_thr))[0]) iou_sampling_set = set( np.where(max_overlaps >= self.floor_thr)[0]) elif self.floor_thr == 0: floor_set = set(np.where(max_overlaps == 0)[0]) iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) else: floor_set = set() iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) # for sampling interval calculation self.floor_thr = 0 floor_neg_inds = list(floor_set & neg_set) iou_sampling_neg_inds = list(iou_sampling_set & neg_set) num_expected_iou_sampling = int(num_expected * (1 - self.floor_fraction)) if len(iou_sampling_neg_inds) > num_expected_iou_sampling: if self.num_bins >= 2: iou_sampled_inds = self.sample_via_interval( max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling) else: iou_sampled_inds = self.random_choice( iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array( iou_sampling_neg_inds, dtype=np.int) num_expected_floor = num_expected - len(iou_sampled_inds) if len(floor_neg_inds) > num_expected_floor: sampled_floor_inds = self.random_choice( floor_neg_inds, num_expected_floor) else: sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) sampled_inds = np.concatenate( (sampled_floor_inds, iou_sampled_inds)) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(neg_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate((sampled_inds, extra_inds)) sampled_inds = torch.from_numpy(sampled_inds).long().to( assign_result.gt_inds.device) return sampled_inds ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/mask_pseudo_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """copy from https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" import torch from mmdet.core.bbox.builder import BBOX_SAMPLERS from .base_sampler import BaseSampler from .mask_sampling_result import MaskSamplingResult @BBOX_SAMPLERS.register_module() class MaskPseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError def sample(self, assign_result, masks, gt_masks, **kwargs): """Directly returns the positive and negative indices of samples. Args: assign_result (:obj:`AssignResult`): Assigned results masks (torch.Tensor): Bounding boxes gt_masks (torch.Tensor): Ground truth boxes Returns: :obj:`SamplingResult`: sampler results """ pos_inds = torch.nonzero( assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8) sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, gt_masks, assign_result, gt_flags) return sampling_result ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/mask_sampling_result.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """copy from https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" import torch from .sampling_result import SamplingResult class MaskSamplingResult(SamplingResult): """Mask sampling result.""" def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result, gt_flags): self.pos_inds = pos_inds self.neg_inds = neg_inds self.pos_masks = masks[pos_inds] self.neg_masks = masks[neg_inds] self.pos_is_gt = gt_flags[pos_inds] self.num_gts = gt_masks.shape[0] self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 if gt_masks.numel() == 0: # hack for index error case assert self.pos_assigned_gt_inds.numel() == 0 self.pos_gt_masks = torch.empty_like(gt_masks) else: self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :] if assign_result.labels is not None: self.pos_gt_labels = assign_result.labels[pos_inds] else: self.pos_gt_labels = None @property def masks(self): """torch.Tensor: concatenated positive and negative boxes""" return torch.cat([self.pos_masks, self.neg_masks]) def __nice__(self): data = self.info.copy() data['pos_masks'] = data.pop('pos_masks').shape data['neg_masks'] = data.pop('neg_masks').shape parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] body = ' ' + ',\n '.join(parts) return '{\n' + body + '\n}' @property def info(self): """Returns a dictionary of info about the object.""" return { 'pos_inds': self.pos_inds, 'neg_inds': self.neg_inds, 'pos_masks': self.pos_masks, 'neg_masks': self.neg_masks, 'pos_is_gt': self.pos_is_gt, 'num_gts': self.num_gts, 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, } ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/ohem_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_SAMPLERS from ..transforms import bbox2roi from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class OHEMSampler(BaseSampler): r"""Online Hard Example Mining Sampler described in `Training Region-based Object Detectors with Online Hard Example Mining `_. """ def __init__(self, num, pos_fraction, context, neg_pos_ub=-1, add_gt_as_proposals=True, loss_key='loss_cls', **kwargs): super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.context = context if not hasattr(self.context, 'num_stages'): self.bbox_head = self.context.bbox_head else: self.bbox_head = self.context.bbox_head[self.context.current_stage] self.loss_key = loss_key def hard_mining(self, inds, num_expected, bboxes, labels, feats): with torch.no_grad(): rois = bbox2roi([bboxes]) if not hasattr(self.context, 'num_stages'): bbox_results = self.context._bbox_forward(feats, rois) else: bbox_results = self.context._bbox_forward( self.context.current_stage, feats, rois) cls_score = bbox_results['cls_score'] loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=rois, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')[self.loss_key] _, topk_loss_inds = loss.topk(num_expected) return inds[topk_loss_inds] def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): """Sample positive boxes. Args: assign_result (:obj:`AssignResult`): Assigned results num_expected (int): Number of expected positive samples bboxes (torch.Tensor, optional): Boxes. Defaults to None. feats (list[torch.Tensor], optional): Multi-level features. Defaults to None. Returns: torch.Tensor: Indices of positive samples """ # Sample some hard positive samples pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats) def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): """Sample negative boxes. Args: assign_result (:obj:`AssignResult`): Assigned results num_expected (int): Number of expected negative samples bboxes (torch.Tensor, optional): Boxes. Defaults to None. feats (list[torch.Tensor], optional): Multi-level features. Defaults to None. Returns: torch.Tensor: Indices of negative samples """ # Sample some hard negative samples neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: neg_labels = assign_result.labels.new_empty( neg_inds.size(0)).fill_(self.bbox_head.num_classes) return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], neg_labels, feats) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/pseudo_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_SAMPLERS from .base_sampler import BaseSampler from .sampling_result import SamplingResult @BBOX_SAMPLERS.register_module() class PseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs): """Directly returns the positive and negative indices of samples. Args: assign_result (:obj:`AssignResult`): Assigned results bboxes (torch.Tensor): Bounding boxes gt_bboxes (torch.Tensor): Ground truth boxes Returns: :obj:`SamplingResult`: sampler results """ pos_inds = torch.nonzero( assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/random_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_SAMPLERS from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class RandomSampler(BaseSampler): """Random sampler. Args: num (int): Number of samples pos_fraction (float): Fraction of positive samples neg_pos_ub (int, optional): Upper bound number of negative and positive samples. Defaults to -1. add_gt_as_proposals (bool, optional): Whether to add ground truth boxes as proposals. Defaults to True. """ def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): from mmdet.core.bbox import demodata super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.rng = demodata.ensure_rng(kwargs.get('rng', None)) def random_choice(self, gallery, num): """Random select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) # This is a temporary fix. We can revert the following code # when PyTorch fixes the abnormal return of torch.randperm. # See: https://github.com/open-mmlab/mmdetection/pull/5014 perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result, num_expected, **kwargs): """Randomly sample some positive samples.""" pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, **kwargs): """Randomly sample some negative samples.""" neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: return self.random_choice(neg_inds, num_expected) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/sampling_result.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.utils import util_mixins class SamplingResult(util_mixins.NiceRepr): """Bbox sampling result. Example: >>> # xdoctest: +IGNORE_WANT >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA >>> self = SamplingResult.random(rng=10) >>> print(f'self = {self}') self = """ def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags): self.pos_inds = pos_inds self.neg_inds = neg_inds self.pos_bboxes = bboxes[pos_inds] self.neg_bboxes = bboxes[neg_inds] self.pos_is_gt = gt_flags[pos_inds] self.num_gts = gt_bboxes.shape[0] self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 if gt_bboxes.numel() == 0: # hack for index error case assert self.pos_assigned_gt_inds.numel() == 0 self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4) else: if len(gt_bboxes.shape) < 2: gt_bboxes = gt_bboxes.view(-1, 4) self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long(), :] if assign_result.labels is not None: self.pos_gt_labels = assign_result.labels[pos_inds] else: self.pos_gt_labels = None @property def bboxes(self): """torch.Tensor: concatenated positive and negative boxes""" return torch.cat([self.pos_bboxes, self.neg_bboxes]) def to(self, device): """Change the device of the data inplace. Example: >>> self = SamplingResult.random() >>> print(f'self = {self.to(None)}') >>> # xdoctest: +REQUIRES(--gpu) >>> print(f'self = {self.to(0)}') """ _dict = self.__dict__ for key, value in _dict.items(): if isinstance(value, torch.Tensor): _dict[key] = value.to(device) return self def __nice__(self): data = self.info.copy() data['pos_bboxes'] = data.pop('pos_bboxes').shape data['neg_bboxes'] = data.pop('neg_bboxes').shape parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] body = ' ' + ',\n '.join(parts) return '{\n' + body + '\n}' @property def info(self): """Returns a dictionary of info about the object.""" return { 'pos_inds': self.pos_inds, 'neg_inds': self.neg_inds, 'pos_bboxes': self.pos_bboxes, 'neg_bboxes': self.neg_bboxes, 'pos_is_gt': self.pos_is_gt, 'num_gts': self.num_gts, 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, } @classmethod def random(cls, rng=None, **kwargs): """ Args: rng (None | int | numpy.random.RandomState): seed or state. kwargs (keyword arguments): - num_preds: number of predicted boxes - num_gts: number of true boxes - p_ignore (float): probability of a predicted box assigned to \ an ignored truth. - p_assigned (float): probability of a predicted box not being \ assigned. - p_use_label (float | bool): with labels or not. Returns: :obj:`SamplingResult`: Randomly generated sampling result. Example: >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA >>> self = SamplingResult.random() >>> print(self.__dict__) """ from mmdet.core.bbox import demodata from mmdet.core.bbox.assigners.assign_result import AssignResult from mmdet.core.bbox.samplers.random_sampler import RandomSampler rng = demodata.ensure_rng(rng) # make probabilistic? num = 32 pos_fraction = 0.5 neg_pos_ub = -1 assign_result = AssignResult.random(rng=rng, **kwargs) # Note we could just compute an assignment bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng) gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng) if rng.rand() > 0.2: # sometimes algorithms squeeze their data, be robust to that gt_bboxes = gt_bboxes.squeeze() bboxes = bboxes.squeeze() if assign_result.labels is None: gt_labels = None else: gt_labels = None # todo if gt_labels is None: add_gt_as_proposals = False else: add_gt_as_proposals = True # make probabilistic? sampler = RandomSampler( num, pos_fraction, neg_pos_ub=neg_pos_ub, add_gt_as_proposals=add_gt_as_proposals, rng=rng) self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) return self ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/score_hlr_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops import nms_match from ..builder import BBOX_SAMPLERS from ..transforms import bbox2roi from .base_sampler import BaseSampler from .sampling_result import SamplingResult @BBOX_SAMPLERS.register_module() class ScoreHLRSampler(BaseSampler): r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample Attention in Object Detection `_. Score hierarchical local rank (HLR) differentiates with RandomSampler in negative part. It firstly computes Score-HLR in a two-step way, then linearly maps score hlr to the loss weights. Args: num (int): Total number of sampled RoIs. pos_fraction (float): Fraction of positive samples. context (:class:`BaseRoIHead`): RoI head that the sampler belongs to. neg_pos_ub (int): Upper bound of the ratio of num negative to num positive, -1 means no upper bound. add_gt_as_proposals (bool): Whether to add ground truth as proposals. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. score_thr (float): Minimum score that a negative sample is to be considered as valid bbox. """ def __init__(self, num, pos_fraction, context, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0, score_thr=0.05, iou_thr=0.5, **kwargs): super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.k = k self.bias = bias self.score_thr = score_thr self.iou_thr = iou_thr self.context = context # context of cascade detectors is a list, so distinguish them here. if not hasattr(context, 'num_stages'): self.bbox_roi_extractor = context.bbox_roi_extractor self.bbox_head = context.bbox_head self.with_shared_head = context.with_shared_head if self.with_shared_head: self.shared_head = context.shared_head else: self.bbox_roi_extractor = context.bbox_roi_extractor[ context.current_stage] self.bbox_head = context.bbox_head[context.current_stage] @staticmethod def random_choice(gallery, num): """Randomly select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result, num_expected, **kwargs): """Randomly sample some positive samples.""" pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten() if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, bboxes, feats=None, img_meta=None, **kwargs): """Sample negative samples. Score-HLR sampler is done in the following steps: 1. Take the maximum positive score prediction of each negative samples as s_i. 2. Filter out negative samples whose s_i <= score_thr, the left samples are called valid samples. 3. Use NMS-Match to divide valid samples into different groups, samples in the same group will greatly overlap with each other 4. Rank the matched samples in two-steps to get Score-HLR. (1) In the same group, rank samples with their scores. (2) In the same score rank across different groups, rank samples with their scores again. 5. Linearly map Score-HLR to the final label weights. Args: assign_result (:obj:`AssignResult`): result of assigner. num_expected (int): Expected number of samples. bboxes (Tensor): bbox to be sampled. feats (Tensor): Features come from FPN. img_meta (dict): Meta information dictionary. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten() num_neg = neg_inds.size(0) if num_neg == 0: return neg_inds, None with torch.no_grad(): neg_bboxes = bboxes[neg_inds] neg_rois = bbox2roi([neg_bboxes]) bbox_result = self.context._bbox_forward(feats, neg_rois) cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[ 'bbox_pred'] ori_loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=None, labels=neg_inds.new_full((num_neg, ), self.bbox_head.num_classes), label_weights=cls_score.new_ones(num_neg), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls'] # filter out samples with the max score lower than score_thr max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1) valid_inds = (max_score > self.score_thr).nonzero().view(-1) invalid_inds = (max_score <= self.score_thr).nonzero().view(-1) num_valid = valid_inds.size(0) num_invalid = invalid_inds.size(0) num_expected = min(num_neg, num_expected) num_hlr = min(num_valid, num_expected) num_rand = num_expected - num_hlr if num_valid > 0: valid_rois = neg_rois[valid_inds] valid_max_score = max_score[valid_inds] valid_argmax_score = argmax_score[valid_inds] valid_bbox_pred = bbox_pred[valid_inds] # valid_bbox_pred shape: [num_valid, #num_classes, 4] valid_bbox_pred = valid_bbox_pred.view( valid_bbox_pred.size(0), -1, 4) selected_bbox_pred = valid_bbox_pred[range(num_valid), valid_argmax_score] pred_bboxes = self.bbox_head.bbox_coder.decode( valid_rois[:, 1:], selected_bbox_pred) pred_bboxes_with_score = torch.cat( [pred_bboxes, valid_max_score[:, None]], -1) group = nms_match(pred_bboxes_with_score, self.iou_thr) # imp: importance imp = cls_score.new_zeros(num_valid) for g in group: g_score = valid_max_score[g] # g_score has already sorted rank = g_score.new_tensor(range(g_score.size(0))) imp[g] = num_valid - rank + g_score _, imp_rank_inds = imp.sort(descending=True) _, imp_rank = imp_rank_inds.sort() hlr_inds = imp_rank_inds[:num_expected] if num_rand > 0: rand_inds = torch.randperm(num_invalid)[:num_rand] select_inds = torch.cat( [valid_inds[hlr_inds], invalid_inds[rand_inds]]) else: select_inds = valid_inds[hlr_inds] neg_label_weights = cls_score.new_ones(num_expected) up_bound = max(num_expected, num_valid) imp_weights = (up_bound - imp_rank[hlr_inds].float()) / up_bound neg_label_weights[:num_hlr] = imp_weights neg_label_weights[num_hlr:] = imp_weights.min() neg_label_weights = (self.bias + (1 - self.bias) * neg_label_weights).pow( self.k) ori_selected_loss = ori_loss[select_inds] new_loss = ori_selected_loss * neg_label_weights norm_ratio = ori_selected_loss.sum() / new_loss.sum() neg_label_weights *= norm_ratio else: neg_label_weights = cls_score.new_ones(num_expected) select_inds = torch.randperm(num_neg)[:num_expected] return neg_inds[select_inds], neg_label_weights def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, img_meta=None, **kwargs): """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negative label weights. """ bboxes = bboxes[:, :4] gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals: bboxes = torch.cat([gt_bboxes, bboxes], dim=0) assign_result.add_gt_(gt_labels) gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs) num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds, neg_label_weights = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes, img_meta=img_meta, **kwargs) return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags), neg_label_weights ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/transforms.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch def find_inside_bboxes(bboxes, img_h, img_w): """Find bboxes as long as a part of bboxes is inside the image. Args: bboxes (Tensor): Shape (N, 4). img_h (int): Image height. img_w (int): Image width. Returns: Tensor: Index of the remaining bboxes. """ inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \ & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0) return inside_inds def bbox_flip(bboxes, img_shape, direction='horizontal'): """Flip bboxes horizontally or vertically. Args: bboxes (Tensor): Shape (..., 4*k) img_shape (tuple): Image shape. direction (str): Flip direction, options are "horizontal", "vertical", "diagonal". Default: "horizontal" Returns: Tensor: Flipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 assert direction in ['horizontal', 'vertical', 'diagonal'] flipped = bboxes.clone() if direction == 'horizontal': flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] elif direction == 'vertical': flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] else: flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] return flipped def bbox_mapping(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal'): """Map bboxes from the original image scale to testing scale.""" new_bboxes = bboxes * bboxes.new_tensor(scale_factor) if flip: new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) return new_bboxes def bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal'): """Map bboxes from testing scale to original image scale.""" new_bboxes = bbox_flip(bboxes, img_shape, flip_direction) if flip else bboxes new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) return new_bboxes.view(bboxes.shape) def bbox2roi(bbox_list): """Convert a list of bboxes to roi format. Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] """ rois_list = [] for img_id, bboxes in enumerate(bbox_list): if bboxes.size(0) > 0: img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) else: rois = bboxes.new_zeros((0, 5)) rois_list.append(rois) rois = torch.cat(rois_list, 0) return rois def roi2bbox(rois): """Convert rois to bounding box format. Args: rois (torch.Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: list[torch.Tensor]: Converted boxes of corresponding rois. """ bbox_list = [] img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) for img_id in img_ids: inds = (rois[:, 0] == img_id.item()) bbox = rois[inds, 1:] bbox_list.append(bbox) return bbox_list def bbox2result(bboxes, labels, num_classes): """Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor | np.ndarray): shape (n, 5) labels (torch.Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class """ if bboxes.shape[0] == 0: return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] else: if isinstance(bboxes, torch.Tensor): bboxes = bboxes.detach().cpu().numpy() labels = labels.detach().cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes)] def distance2bbox(points, distance, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) """ x1 = points[..., 0] - distance[..., 0] y1 = points[..., 1] - distance[..., 1] x2 = points[..., 0] + distance[..., 2] y2 = points[..., 1] + distance[..., 3] bboxes = torch.stack([x1, y1, x2, y2], -1) if max_shape is not None: if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export(): # speed up bboxes[:, 0::2].clamp_(min=0, max=max_shape[1]) bboxes[:, 1::2].clamp_(min=0, max=max_shape[0]) return bboxes # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = x1.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(x1) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = x1.new_tensor(0) max_xy = torch.cat([max_shape, max_shape], dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes def bbox2distance(points, bbox, max_dis=None, eps=0.1): """Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2), [x, y]. bbox (Tensor): Shape (n, 4), "xyxy" format max_dis (float): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances. """ left = points[:, 0] - bbox[:, 0] top = points[:, 1] - bbox[:, 1] right = bbox[:, 2] - points[:, 0] bottom = bbox[:, 3] - points[:, 1] if max_dis is not None: left = left.clamp(min=0, max=max_dis - eps) top = top.clamp(min=0, max=max_dis - eps) right = right.clamp(min=0, max=max_dis - eps) bottom = bottom.clamp(min=0, max=max_dis - eps) return torch.stack([left, top, right, bottom], -1) def bbox_rescale(bboxes, scale_factor=1.0): """Rescale bounding box w.r.t. scale_factor. Args: bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois scale_factor (float): rescale factor Returns: Tensor: Rescaled bboxes. """ if bboxes.size(1) == 5: bboxes_ = bboxes[:, 1:] inds_ = bboxes[:, 0] else: bboxes_ = bboxes cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 w = bboxes_[:, 2] - bboxes_[:, 0] h = bboxes_[:, 3] - bboxes_[:, 1] w = w * scale_factor h = h * scale_factor x1 = cx - 0.5 * w x2 = cx + 0.5 * w y1 = cy - 0.5 * h y2 = cy + 0.5 * h if bboxes.size(1) == 5: rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) else: rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return rescaled_bboxes def bbox_cxcywh_to_xyxy(bbox): """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] return torch.cat(bbox_new, dim=-1) def bbox_xyxy_to_cxcywh(bbox): """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] return torch.cat(bbox_new, dim=-1) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/data_structures/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .general_data import GeneralData from .instance_data import InstanceData __all__ = ['GeneralData', 'InstanceData'] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/data_structures/general_data.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import numpy as np import torch from mmdet.utils.util_mixins import NiceRepr class GeneralData(NiceRepr): """A general data structure of OpenMMlab. A data structure that stores the meta information, the annotations of the images or the model predictions, which can be used in communication between components. The attributes in `GeneralData` are divided into two parts, the `meta_info_fields` and the `data_fields` respectively. - `meta_info_fields`: Usually contains the information about the image such as filename, image_shape, pad_shape, etc. All attributes in it are immutable once set, but the user can add new meta information with `set_meta_info` function, all information can be accessed with methods `meta_info_keys`, `meta_info_values`, `meta_info_items`. - `data_fields`: Annotations or model predictions are stored. The attributes can be accessed or modified by dict-like or object-like operations, such as `.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`, `values()`, `items()`. Users can also apply tensor-like methods to all obj:`torch.Tensor` in the `data_fileds`, such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()` `.detach()`, `.numpy()` Args: meta_info (dict, optional): A dict contains the meta information of single image. such as `img_shape`, `scale_factor`, etc. Default: None. data (dict, optional): A dict contains annotations of single image or model predictions. Default: None. Examples: >>> from mmdet.core import GeneralData >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) >>> instance_data = GeneralData(meta_info=img_meta) >>> img_shape in instance_data True >>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3]) >>> instance_data["det_scores"] = torch.Tensor([0.01, 0.1, 0.2, 0.3]) >>> print(results) >>> instance_data.det_scores tensor([0.0100, 0.1000, 0.2000, 0.3000]) >>> instance_data.det_labels tensor([0, 1, 2, 3]) >>> instance_data['det_labels'] tensor([0, 1, 2, 3]) >>> 'det_labels' in instance_data True >>> instance_data.img_shape (800, 1196, 3) >>> 'det_scores' in instance_data True >>> del instance_data.det_scores >>> 'det_scores' in instance_data False >>> det_labels = instance_data.pop('det_labels', None) >>> det_labels tensor([0, 1, 2, 3]) >>> 'det_labels' in instance_data >>> False """ def __init__(self, meta_info=None, data=None): self._meta_info_fields = set() self._data_fields = set() if meta_info is not None: self.set_meta_info(meta_info=meta_info) if data is not None: self.set_data(data) def set_meta_info(self, meta_info): """Add meta information. Args: meta_info (dict): A dict contains the meta information of image. such as `img_shape`, `scale_factor`, etc. Default: None. """ assert isinstance(meta_info, dict), f'meta should be a `dict` but get {meta_info}' meta = copy.deepcopy(meta_info) for k, v in meta.items(): # should be consistent with original meta_info if k in self._meta_info_fields: ori_value = getattr(self, k) if isinstance(ori_value, (torch.Tensor, np.ndarray)): if (ori_value == v).all(): continue else: raise KeyError( f'img_meta_info {k} has been set as ' f'{getattr(self, k)} before, which is immutable ') elif ori_value == v: continue else: raise KeyError( f'img_meta_info {k} has been set as ' f'{getattr(self, k)} before, which is immutable ') else: self._meta_info_fields.add(k) self.__dict__[k] = v def set_data(self, data): """Update a dict to `data_fields`. Args: data (dict): A dict contains annotations of image or model predictions. Default: None. """ assert isinstance(data, dict), f'meta should be a `dict` but get {data}' for k, v in data.items(): self.__setattr__(k, v) def new(self, meta_info=None, data=None): """Return a new results with same image meta information. Args: meta_info (dict, optional): A dict contains the meta information of image. such as `img_shape`, `scale_factor`, etc. Default: None. data (dict, optional): A dict contains annotations of image or model predictions. Default: None. """ new_data = self.__class__() new_data.set_meta_info(dict(self.meta_info_items())) if meta_info is not None: new_data.set_meta_info(meta_info) if data is not None: new_data.set_data(data) return new_data def keys(self): """ Returns: list: Contains all keys in data_fields. """ return [key for key in self._data_fields] def meta_info_keys(self): """ Returns: list: Contains all keys in meta_info_fields. """ return [key for key in self._meta_info_fields] def values(self): """ Returns: list: Contains all values in data_fields. """ return [getattr(self, k) for k in self.keys()] def meta_info_values(self): """ Returns: list: Contains all values in meta_info_fields. """ return [getattr(self, k) for k in self.meta_info_keys()] def items(self): for k in self.keys(): yield (k, getattr(self, k)) def meta_info_items(self): for k in self.meta_info_keys(): yield (k, getattr(self, k)) def __setattr__(self, name, val): if name in ('_meta_info_fields', '_data_fields'): if not hasattr(self, name): super().__setattr__(name, val) else: raise AttributeError( f'{name} has been used as a ' f'private attribute, which is immutable. ') else: if name in self._meta_info_fields: raise AttributeError(f'`{name}` is used in meta information,' f'which is immutable') self._data_fields.add(name) super().__setattr__(name, val) def __delattr__(self, item): if item in ('_meta_info_fields', '_data_fields'): raise AttributeError(f'{item} has been used as a ' f'private attribute, which is immutable. ') if item in self._meta_info_fields: raise KeyError(f'{item} is used in meta information, ' f'which is immutable.') super().__delattr__(item) if item in self._data_fields: self._data_fields.remove(item) # dict-like methods __setitem__ = __setattr__ __delitem__ = __delattr__ def __getitem__(self, name): return getattr(self, name) def get(self, *args): assert len(args) < 3, '`get` get more than 2 arguments' return self.__dict__.get(*args) def pop(self, *args): assert len(args) < 3, '`pop` get more than 2 arguments' name = args[0] if name in self._meta_info_fields: raise KeyError(f'{name} is a key in meta information, ' f'which is immutable') if args[0] in self._data_fields: self._data_fields.remove(args[0]) return self.__dict__.pop(*args) # with default value elif len(args) == 2: return args[1] else: raise KeyError(f'{args[0]}') def __contains__(self, item): return item in self._data_fields or \ item in self._meta_info_fields # Tensor-like methods def to(self, *args, **kwargs): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if hasattr(v, 'to'): v = v.to(*args, **kwargs) new_data[k] = v return new_data # Tensor-like methods def cpu(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.cpu() new_data[k] = v return new_data # Tensor-like methods def npu(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.npu() new_data[k] = v return new_data # Tensor-like methods def mlu(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.mlu() new_data[k] = v return new_data # Tensor-like methods def cuda(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.cuda() new_data[k] = v return new_data # Tensor-like methods def detach(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.detach() new_data[k] = v return new_data # Tensor-like methods def numpy(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.detach().cpu().numpy() new_data[k] = v return new_data def __nice__(self): repr = '\n \n META INFORMATION \n' for k, v in self.meta_info_items(): repr += f'{k}: {v} \n' repr += '\n DATA FIELDS \n' for k, v in self.items(): if isinstance(v, (torch.Tensor, np.ndarray)): repr += f'shape of {k}: {v.shape} \n' else: repr += f'{k}: {v} \n' return repr + '\n' ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/data_structures/instance_data.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import itertools import numpy as np import torch from .general_data import GeneralData class InstanceData(GeneralData): """Data structure for instance-level annnotations or predictions. Subclass of :class:`GeneralData`. All value in `data_fields` should have the same length. This design refer to https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501 Examples: >>> from mmdet.core import InstanceData >>> import numpy as np >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) >>> results = InstanceData(img_meta) >>> img_shape in results True >>> results.det_labels = torch.LongTensor([0, 1, 2, 3]) >>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3]) >>> results["det_masks"] = np.ndarray(4, 2, 2) >>> len(results) 4 >>> print(resutls) >>> sorted_results = results[results.det_scores.sort().indices] >>> sorted_results.det_scores tensor([0.0100, 0.3000, 0.6000, 0.7000]) >>> sorted_results.det_labels tensor([0, 3, 2, 1]) >>> print(results[results.scores > 0.5]) >>> results[results.det_scores > 0.5].det_labels tensor([1, 2]) >>> results[results.det_scores > 0.5].det_scores tensor([0.7000, 0.6000]) """ def __setattr__(self, name, value): if name in ('_meta_info_fields', '_data_fields'): if not hasattr(self, name): super().__setattr__(name, value) else: raise AttributeError( f'{name} has been used as a ' f'private attribute, which is immutable. ') else: assert isinstance(value, (torch.Tensor, np.ndarray, list)), \ f'Can set {type(value)}, only support' \ f' {(torch.Tensor, np.ndarray, list)}' if self._data_fields: assert len(value) == len(self), f'the length of ' \ f'values {len(value)} is ' \ f'not consistent with' \ f' the length ' \ f'of this :obj:`InstanceData` ' \ f'{len(self)} ' super().__setattr__(name, value) def __getitem__(self, item): """ Args: item (str, obj:`slice`, obj`torch.LongTensor`, obj:`torch.BoolTensor`): get the corresponding values according to item. Returns: obj:`InstanceData`: Corresponding values. """ assert len(self), ' This is a empty instance' assert isinstance( item, (str, slice, int, torch.LongTensor, torch.BoolTensor)) if isinstance(item, str): return getattr(self, item) if type(item) == int: if item >= len(self) or item < -len(self): raise IndexError(f'Index {item} out of range!') else: # keep the dimension item = slice(item, None, len(self)) new_data = self.new() if isinstance(item, (torch.Tensor)): assert item.dim() == 1, 'Only support to get the' \ ' values along the first dimension.' if isinstance(item, torch.BoolTensor): assert len(item) == len(self), f'The shape of the' \ f' input(BoolTensor)) ' \ f'{len(item)} ' \ f' does not match the shape ' \ f'of the indexed tensor ' \ f'in results_filed ' \ f'{len(self)} at ' \ f'first dimension. ' for k, v in self.items(): if isinstance(v, torch.Tensor): new_data[k] = v[item] elif isinstance(v, np.ndarray): new_data[k] = v[item.cpu().numpy()] elif isinstance(v, list): r_list = [] # convert to indexes from boolTensor if isinstance(item, torch.BoolTensor): indexes = torch.nonzero(item).view(-1) else: indexes = item for index in indexes: r_list.append(v[index]) new_data[k] = r_list else: # item is a slice for k, v in self.items(): new_data[k] = v[item] return new_data @staticmethod def cat(instances_list): """Concat the predictions of all :obj:`InstanceData` in the list. Args: instances_list (list[:obj:`InstanceData`]): A list of :obj:`InstanceData`. Returns: obj:`InstanceData` """ assert all( isinstance(results, InstanceData) for results in instances_list) assert len(instances_list) > 0 if len(instances_list) == 1: return instances_list[0] new_data = instances_list[0].new() for k in instances_list[0]._data_fields: values = [results[k] for results in instances_list] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, np.ndarray): values = np.concatenate(values, axis=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) else: raise ValueError( f'Can not concat the {k} which is a {type(v0)}') new_data[k] = values return new_data def __len__(self): if len(self._data_fields): for v in self.values(): return len(v) else: raise AssertionError('This is an empty `InstanceData`.') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, get_classes, imagenet_det_classes, imagenet_vid_classes, oid_challenge_classes, oid_v6_classes, voc_classes) from .eval_hooks import DistEvalHook, EvalHook from .mean_ap import average_precision, eval_map, print_map_summary from .panoptic_utils import INSTANCE_OFFSET from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, print_recall_summary) __all__ = [ 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', 'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/bbox_overlaps.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6, use_legacy_coordinate=False): """Calculate the ious between each bbox of bboxes1 and bboxes2. Args: bboxes1 (ndarray): Shape (n, 4) bboxes2 (ndarray): Shape (k, 4) mode (str): IOU (intersection over union) or IOF (intersection over foreground) use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Note when function is used in `VOCDataset`, it should be True to align with the official implementation `http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar` Default: False. Returns: ious (ndarray): Shape (n, k) """ assert mode in ['iou', 'iof'] if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. bboxes1 = bboxes1.astype(np.float32) bboxes2 = bboxes2.astype(np.float32) rows = bboxes1.shape[0] cols = bboxes2.shape[0] ious = np.zeros((rows, cols), dtype=np.float32) if rows * cols == 0: return ious exchange = False if bboxes1.shape[0] > bboxes2.shape[0]: bboxes1, bboxes2 = bboxes2, bboxes1 ious = np.zeros((cols, rows), dtype=np.float32) exchange = True area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * ( bboxes1[:, 3] - bboxes1[:, 1] + extra_length) area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * ( bboxes2[:, 3] - bboxes2[:, 1] + extra_length) for i in range(bboxes1.shape[0]): x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum( y_end - y_start + extra_length, 0) if mode == 'iou': union = area1[i] + area2 - overlap else: union = area1[i] if not exchange else area2 union = np.maximum(union, eps) ious[i, :] = overlap / union if exchange: ious = ious.T return ious ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/class_names.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv def wider_face_classes(): return ['face'] def voc_classes(): return [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'potted plant', 'sheep', 'sofa', 'train', 'tvmonitor' ] def imagenet_det_classes(): return [ 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra' ] def imagenet_vid_classes(): return [ 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra' ] def coco_classes(): return [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' ] def cityscapes_classes(): return [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle' ] def oid_challenge_classes(): return [ 'Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', 'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink', 'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table', 'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light', 'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum', 'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat', 'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt', 'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear', 'Vehicle registration plate', 'Microphone', 'Musical keyboard', 'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable', 'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries', 'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane', 'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail', 'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle', 'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat', 'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame', 'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet', 'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag', 'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree', 'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine', 'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance', 'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard', 'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf', 'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch', 'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster', 'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal', 'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer', 'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer', 'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace', 'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry', 'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot', 'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite', 'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper', 'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft', 'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter', 'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra', 'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard', 'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building', 'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll', 'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon', 'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock', 'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance', 'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair', 'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat', 'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen', 'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust', 'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot', 'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken', 'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod', 'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet', 'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture', 'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat', 'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep', 'Tablet computer', 'Pillow', 'Kitchen & dining room table', 'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree', 'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread', 'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope', 'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber', 'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies', 'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch', 'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags', 'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock', 'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza', 'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store', 'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry', 'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase', 'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft', 'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer', 'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon', 'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger', 'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball', 'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin', 'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle', 'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot', 'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle', 'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman', 'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper', 'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone', 'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear', 'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail', 'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn', 'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango', 'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell', 'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase', 'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup', 'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula', 'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon' ] def oid_v6_classes(): return [ 'Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', 'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber', 'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick', 'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle', 'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot', 'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy', 'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt', 'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear', 'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot', 'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee', 'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw', 'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern', 'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace', 'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer', 'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock', 'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft', 'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile', 'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel', 'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola', 'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building', 'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor', 'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment', 'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini', 'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur', 'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula', 'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser', 'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero', 'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener', 'Goggles', 'Human body', 'Roller skates', 'Coffee cup', 'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign', 'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker', 'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food', 'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove', 'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax', 'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart', 'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind', 'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light', 'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear', 'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle', 'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat', 'Baseball bat', 'Baseball glove', 'Mixing bowl', 'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House', 'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed', 'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer', 'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster', 'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw', 'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate', 'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove', 'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)', 'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet', 'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife', 'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse', 'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard', 'Billiard table', 'Mammal', 'Mouse', 'Motorcycle', 'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow', 'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk', 'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom', 'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device', 'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard', 'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball', 'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl', 'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta', 'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer', 'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile', 'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda', 'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood', 'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi', 'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine', 'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table', 'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco', 'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree', 'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray', 'Trousers', 'Bowling equipment', 'Football helmet', 'Truck', 'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag', 'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale', 'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion', 'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck', 'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper', 'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog', 'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer', 'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark', 'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser', 'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger', 'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus', 'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull', 'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench', 'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange', 'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet', 'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut', 'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera', 'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable', 'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish', 'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple', 'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower', 'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug', 'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow', 'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone', 'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray', 'Kitchen & dining room table', 'Dog bed', 'Cake stand', 'Cat furniture', 'Bathroom accessory', 'Facial tissue holder', 'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler', 'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry', 'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily', 'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant', 'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon', 'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich', 'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod', 'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume', 'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair', 'Rugby ball', 'Armadillo', 'Maracas', 'Helmet' ] dataset_aliases = { 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'], 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'], 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'], 'coco': ['coco', 'mscoco', 'ms_coco'], 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'], 'cityscapes': ['cityscapes'], 'oid_challenge': ['oid_challenge', 'openimages_challenge'], 'oid_v6': ['oid_v6', 'openimages_v6'] } def get_classes(dataset): """Get class names of a dataset.""" alias2name = {} for name, aliases in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if dataset in alias2name: labels = eval(alias2name[dataset] + '_classes()') else: raise ValueError(f'Unrecognized dataset: {dataset}') else: raise TypeError(f'dataset must a str, but got {type(dataset)}') return labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/eval_hooks.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import bisect import os.path as osp import mmcv import torch.distributed as dist from mmcv.runner import DistEvalHook as BaseDistEvalHook from mmcv.runner import EvalHook as BaseEvalHook from torch.nn.modules.batchnorm import _BatchNorm def _calc_dynamic_intervals(start_interval, dynamic_interval_list): assert mmcv.is_list_of(dynamic_interval_list, tuple) dynamic_milestones = [0] dynamic_milestones.extend( [dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) dynamic_intervals = [start_interval] dynamic_intervals.extend( [dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) return dynamic_milestones, dynamic_intervals class EvalHook(BaseEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(EvalHook, self).__init__(*args, **kwargs) self.latest_results = None self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: self.dynamic_milestones, self.dynamic_intervals = \ _calc_dynamic_intervals(self.interval, dynamic_intervals) def _decide_interval(self, runner): if self.use_dynamic_intervals: progress = runner.epoch if self.by_epoch else runner.iter step = bisect.bisect(self.dynamic_milestones, (progress + 1)) # Dynamically modify the evaluation interval self.interval = self.dynamic_intervals[step - 1] def before_train_epoch(self, runner): """Evaluate the model only at the start of training by epoch.""" self._decide_interval(runner) super().before_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" if not self._should_evaluate(runner): return from mmdet.apis import single_gpu_test # Changed results to self.results so that MMDetWandbHook can access # the evaluation results and log them to wandb. results = single_gpu_test(runner.model, self.dataloader, show=False) self.latest_results = results runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) # the key_score may be `None` so it needs to skip the action to save # the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) # Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, # in order to avoid strong version dependency, we did not directly # inherit EvalHook but BaseDistEvalHook. class DistEvalHook(BaseDistEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(DistEvalHook, self).__init__(*args, **kwargs) self.latest_results = None self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: self.dynamic_milestones, self.dynamic_intervals = \ _calc_dynamic_intervals(self.interval, dynamic_intervals) def _decide_interval(self, runner): if self.use_dynamic_intervals: progress = runner.epoch if self.by_epoch else runner.iter step = bisect.bisect(self.dynamic_milestones, (progress + 1)) # Dynamically modify the evaluation interval self.interval = self.dynamic_intervals[step - 1] def before_train_epoch(self, runner): """Evaluate the model only at the start of training by epoch.""" self._decide_interval(runner) super().before_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" # Synchronization of BatchNorm's buffer (running_mean # and running_var) is not supported in the DDP of pytorch, # which may cause the inconsistent performance of models in # different ranks, so we broadcast BatchNorm's buffers # of rank 0 to other ranks to avoid this. if self.broadcast_bn_buffer: model = runner.model for name, module in model.named_modules(): if isinstance(module, _BatchNorm) and module.track_running_stats: dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) if not self._should_evaluate(runner): return tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') from mmdet.apis import multi_gpu_test # Changed results to self.results so that MMDetWandbHook can access # the evaluation results and log them to wandb. results = multi_gpu_test( runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) self.latest_results = results if runner.rank == 0: print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) # the key_score may be `None` so it needs to skip # the action to save the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/mean_ap.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from multiprocessing import Pool import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .bbox_overlaps import bbox_overlaps from .class_names import get_classes def average_precision(recalls, precisions, mode='area'): """Calculate average precision (for single or multiple scales). Args: recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) mode (str): 'area' or '11points', 'area' means calculating the area under precision-recall curve, '11points' means calculating the average precision of recalls at [0, 0.1, ..., 1] Returns: float or ndarray: calculated average precision """ no_scale = False if recalls.ndim == 1: no_scale = True recalls = recalls[np.newaxis, :] precisions = precisions[np.newaxis, :] assert recalls.shape == precisions.shape and recalls.ndim == 2 num_scales = recalls.shape[0] ap = np.zeros(num_scales, dtype=np.float32) if mode == 'area': zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) ones = np.ones((num_scales, 1), dtype=recalls.dtype) mrec = np.hstack((zeros, recalls, ones)) mpre = np.hstack((zeros, precisions, zeros)) for i in range(mpre.shape[1] - 1, 0, -1): mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) for i in range(num_scales): ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] ap[i] = np.sum( (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) elif mode == '11points': for i in range(num_scales): for thr in np.arange(0, 1 + 1e-3, 0.1): precs = precisions[i, recalls[i, :] >= thr] prec = precs.max() if precs.size > 0 else 0 ap[i] += prec ap /= 11 else: raise ValueError( 'Unrecognized mode, only "area" and "11points" are supported') if no_scale: ap = ap[0] return ap def tpfp_imagenet(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, default_iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False, **kwargs): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None default_iou_thr (float): IoU threshold to be considered as matched for medium and large bboxes (small ones have special rules). Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=np.bool), np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp # of a certain scale. tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp ious = bbox_overlaps( det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate) gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), default_iou_thr) # sort all detections by scores in descending order sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = gt_w * gt_h gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: max_iou = -1 matched_gt = -1 # find best overlapped available gt for j in range(num_gts): # different from PASCAL VOC: allow finding other gts if the # best overlapped ones are already matched by other det bboxes if gt_covered[j]: continue elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: max_iou = ious[i, j] matched_gt = j # there are 4 cases for a det bbox: # 1. it matches a gt, tp = 1, fp = 0 # 2. it matches an ignored gt, tp = 0, fp = 0 # 3. it matches no gt and within area range, tp = 0, fp = 1 # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 if matched_gt >= 0: gt_covered[matched_gt] = 1 if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): tp[k, i] = 1 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 return tp, fp def tpfp_default(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False, **kwargs): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=np.bool), np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of # a certain scale tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) # if there is no gt bboxes in this image, then all det bboxes # within area range are false positives if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp ious = bbox_overlaps( det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) # for each det, the max iou with all gts ious_max = ious.max(axis=1) # for each det, which gt overlaps most with it ious_argmax = ious.argmax(axis=1) # sort all dets in descending order by scores sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: if ious_max[i] >= iou_thr: matched_gt = ious_argmax[i] if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): if not gt_covered[matched_gt]: gt_covered[matched_gt] = True tp[k, i] = 1 else: fp[k, i] = 1 # otherwise ignore this detected bbox, tp = 0, fp = 0 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 return tp, fp def tpfp_openimages(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False, gt_bboxes_group_of=None, use_group_of=True, ioa_thr=0.5, **kwargs): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. gt_bboxes_group_of (ndarray): GT group_of of this image, of shape (k, 1). Default: None use_group_of (bool): Whether to use group of when calculate TP and FP, which only used in OpenImages evaluation. Default: True. ioa_thr (float | None): IoA threshold to be considered as matched, which only used in OpenImages evaluation. Default: 0.5. Returns: tuple[np.ndarray]: Returns a tuple (tp, fp, det_bboxes), where (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). (det_bboxes) whose will filter those are not matched by group of gts when processing Open Images evaluation. The shape is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=np.bool), np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of # a certain scale tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) # if there is no gt bboxes in this image, then all det bboxes # within area range are false positives if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp, det_bboxes if gt_bboxes_group_of is not None and use_group_of: # if handle group-of boxes, divided gt boxes into two parts: # non-group-of and group-of.Then calculate ious and ioas through # non-group-of group-of gts respectively. This only used in # OpenImages evaluation. assert gt_bboxes_group_of.shape[0] == gt_bboxes.shape[0] non_group_gt_bboxes = gt_bboxes[~gt_bboxes_group_of] group_gt_bboxes = gt_bboxes[gt_bboxes_group_of] num_gts_group = group_gt_bboxes.shape[0] ious = bbox_overlaps(det_bboxes, non_group_gt_bboxes) ioas = bbox_overlaps(det_bboxes, group_gt_bboxes, mode='iof') else: # if not consider group-of boxes, only calculate ious through gt boxes ious = bbox_overlaps( det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) ioas = None if ious.shape[1] > 0: # for each det, the max iou with all gts ious_max = ious.max(axis=1) # for each det, which gt overlaps most with it ious_argmax = ious.argmax(axis=1) # sort all dets in descending order by scores sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = ( gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: if ious_max[i] >= iou_thr: matched_gt = ious_argmax[i] if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): if not gt_covered[matched_gt]: gt_covered[matched_gt] = True tp[k, i] = 1 else: fp[k, i] = 1 # otherwise ignore this detected bbox, tp = 0, fp = 0 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 else: # if there is no no-group-of gt bboxes in this image, # then all det bboxes within area range are false positives. # Only used in OpenImages evaluation. if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 if ioas is None or ioas.shape[1] <= 0: return tp, fp, det_bboxes else: # The evaluation of group-of TP and FP are done in two stages: # 1. All detections are first matched to non group-of boxes; true # positives are determined. # 2. Detections that are determined as false positives are matched # against group-of boxes and calculated group-of TP and FP. # Only used in OpenImages evaluation. det_bboxes_group = np.zeros( (num_scales, ioas.shape[1], det_bboxes.shape[1]), dtype=float) match_group_of = np.zeros((num_scales, num_dets), dtype=bool) tp_group = np.zeros((num_scales, num_gts_group), dtype=np.float32) ioas_max = ioas.max(axis=1) # for each det, which gt overlaps most with it ioas_argmax = ioas.argmax(axis=1) # sort all dets in descending order by scores sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): box_is_covered = tp[k] # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: matched_gt = ioas_argmax[i] if not box_is_covered[i]: if ioas_max[i] >= ioa_thr: if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): if not tp_group[k, matched_gt]: tp_group[k, matched_gt] = 1 match_group_of[k, i] = True else: match_group_of[k, i] = True if det_bboxes_group[k, matched_gt, -1] < \ det_bboxes[i, -1]: det_bboxes_group[k, matched_gt] = \ det_bboxes[i] fp_group = (tp_group <= 0).astype(float) tps = [] fps = [] # concatenate tp, fp, and det-boxes which not matched group of # gt boxes and tp_group, fp_group, and det_bboxes_group which # matched group of boxes respectively. for i in range(num_scales): tps.append( np.concatenate((tp[i][~match_group_of[i]], tp_group[i]))) fps.append( np.concatenate((fp[i][~match_group_of[i]], fp_group[i]))) det_bboxes = np.concatenate( (det_bboxes[~match_group_of[i]], det_bboxes_group[i])) tp = np.vstack(tps) fp = np.vstack(fps) return tp, fp, det_bboxes def get_cls_results(det_results, annotations, class_id): """Get det results and gt information of a certain class. Args: det_results (list[list]): Same as `eval_map()`. annotations (list[dict]): Same as `eval_map()`. class_id (int): ID of a specific class. Returns: tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes """ cls_dets = [img_res[class_id] for img_res in det_results] cls_gts = [] cls_gts_ignore = [] for ann in annotations: gt_inds = ann['labels'] == class_id cls_gts.append(ann['bboxes'][gt_inds, :]) if ann.get('labels_ignore', None) is not None: ignore_inds = ann['labels_ignore'] == class_id cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) else: cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) return cls_dets, cls_gts, cls_gts_ignore def get_cls_group_ofs(annotations, class_id): """Get `gt_group_of` of a certain class, which is used in Open Images. Args: annotations (list[dict]): Same as `eval_map()`. class_id (int): ID of a specific class. Returns: list[np.ndarray]: `gt_group_of` of a certain class. """ gt_group_ofs = [] for ann in annotations: gt_inds = ann['labels'] == class_id if ann.get('gt_is_group_ofs', None) is not None: gt_group_ofs.append(ann['gt_is_group_ofs'][gt_inds]) else: gt_group_ofs.append(np.empty((0, 1), dtype=np.bool)) return gt_group_ofs def eval_map(det_results, annotations, scale_ranges=None, iou_thr=0.5, ioa_thr=None, dataset=None, logger=None, tpfp_fn=None, nproc=4, use_legacy_coordinate=False, use_group_of=False): """Evaluate mAP of a dataset. Args: det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. The outer list indicates images, and the inner list indicates per-class detected bboxes. annotations (list[dict]): Ground truth annotations where each item of the list indicates an image. Keys of annotations are: - `bboxes`: numpy array of shape (n, 4) - `labels`: numpy array of shape (n, ) - `bboxes_ignore` (optional): numpy array of shape (k, 4) - `labels_ignore` (optional): numpy array of shape (k, ) scale_ranges (list[tuple] | None): Range of scales to be evaluated, in the format [(min1, max1), (min2, max2), ...]. A range of (32, 64) means the area range between (32**2, 64**2). Default: None. iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. ioa_thr (float | None): IoA threshold to be considered as matched, which only used in OpenImages evaluation. Default: None. dataset (list[str] | str | None): Dataset name or dataset classes, there are minor differences in metrics for different datasets, e.g. "voc07", "imagenet_det", etc. Default: None. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. tpfp_fn (callable | None): The function used to determine true/ false positives. If None, :func:`tpfp_default` is used as default unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this case). If it is given as a function, then this function is used to evaluate tp & fp. Default None. nproc (int): Processes used for computing TP and FP. Default: 4. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. use_group_of (bool): Whether to use group of when calculate TP and FP, which only used in OpenImages evaluation. Default: False. Returns: tuple: (mAP, [dict, dict, ...]) """ assert len(det_results) == len(annotations) if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. num_imgs = len(det_results) num_scales = len(scale_ranges) if scale_ranges is not None else 1 num_classes = len(det_results[0]) # positive class num area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] if scale_ranges is not None else None) # There is no need to use multi processes to process # when num_imgs = 1 . if num_imgs > 1: assert nproc > 0, 'nproc must be at least one.' nproc = min(nproc, num_imgs) pool = Pool(nproc) eval_results = [] for i in range(num_classes): # get gt and det bboxes of this class cls_dets, cls_gts, cls_gts_ignore = get_cls_results( det_results, annotations, i) # choose proper function according to datasets to compute tp and fp if tpfp_fn is None: if dataset in ['det', 'vid']: tpfp_fn = tpfp_imagenet elif dataset in ['oid_challenge', 'oid_v6'] \ or use_group_of is True: tpfp_fn = tpfp_openimages else: tpfp_fn = tpfp_default if not callable(tpfp_fn): raise ValueError( f'tpfp_fn has to be a function or None, but got {tpfp_fn}') if num_imgs > 1: # compute tp and fp for each image with multiple processes args = [] if use_group_of: # used in Open Images Dataset evaluation gt_group_ofs = get_cls_group_ofs(annotations, i) args.append(gt_group_ofs) args.append([use_group_of for _ in range(num_imgs)]) if ioa_thr is not None: args.append([ioa_thr for _ in range(num_imgs)]) tpfp = pool.starmap( tpfp_fn, zip(cls_dets, cls_gts, cls_gts_ignore, [iou_thr for _ in range(num_imgs)], [area_ranges for _ in range(num_imgs)], [use_legacy_coordinate for _ in range(num_imgs)], *args)) else: tpfp = tpfp_fn( cls_dets[0], cls_gts[0], cls_gts_ignore[0], iou_thr, area_ranges, use_legacy_coordinate, gt_bboxes_group_of=(get_cls_group_ofs(annotations, i)[0] if use_group_of else None), use_group_of=use_group_of, ioa_thr=ioa_thr) tpfp = [tpfp] if use_group_of: tp, fp, cls_dets = tuple(zip(*tpfp)) else: tp, fp = tuple(zip(*tpfp)) # calculate gt number of each scale # ignored gts or gts beyond the specific scale are not counted num_gts = np.zeros(num_scales, dtype=int) for j, bbox in enumerate(cls_gts): if area_ranges is None: num_gts[0] += bbox.shape[0] else: gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * ( bbox[:, 3] - bbox[:, 1] + extra_length) for k, (min_area, max_area) in enumerate(area_ranges): num_gts[k] += np.sum((gt_areas >= min_area) & (gt_areas < max_area)) # sort all det bboxes by score, also sort tp and fp cls_dets = np.vstack(cls_dets) num_dets = cls_dets.shape[0] sort_inds = np.argsort(-cls_dets[:, -1]) tp = np.hstack(tp)[:, sort_inds] fp = np.hstack(fp)[:, sort_inds] # calculate recall and precision with tp and fp tp = np.cumsum(tp, axis=1) fp = np.cumsum(fp, axis=1) eps = np.finfo(np.float32).eps recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) precisions = tp / np.maximum((tp + fp), eps) # calculate AP if scale_ranges is None: recalls = recalls[0, :] precisions = precisions[0, :] num_gts = num_gts.item() mode = 'area' if dataset != 'voc07' else '11points' ap = average_precision(recalls, precisions, mode) eval_results.append({ 'num_gts': num_gts, 'num_dets': num_dets, 'recall': recalls, 'precision': precisions, 'ap': ap }) if num_imgs > 1: pool.close() if scale_ranges is not None: # shape (num_classes, num_scales) all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) all_num_gts = np.vstack( [cls_result['num_gts'] for cls_result in eval_results]) mean_ap = [] for i in range(num_scales): if np.any(all_num_gts[:, i] > 0): mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) else: mean_ap.append(0.0) else: aps = [] for cls_result in eval_results: if cls_result['num_gts'] > 0: aps.append(cls_result['ap']) mean_ap = np.array(aps).mean().item() if aps else 0.0 print_map_summary( mean_ap, eval_results, dataset, area_ranges, logger=logger) return mean_ap, eval_results def print_map_summary(mean_ap, results, dataset=None, scale_ranges=None, logger=None): """Print mAP and results of each class. A table will be printed to show the gts/dets/recall/AP of each class and the mAP. Args: mean_ap (float): Calculated from `eval_map()`. results (list[dict]): Calculated from `eval_map()`. dataset (list[str] | str | None): Dataset name or dataset classes. scale_ranges (list[tuple] | None): Range of scales to be evaluated. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. """ if logger == 'silent': return if isinstance(results[0]['ap'], np.ndarray): num_scales = len(results[0]['ap']) else: num_scales = 1 if scale_ranges is not None: assert len(scale_ranges) == num_scales num_classes = len(results) recalls = np.zeros((num_scales, num_classes), dtype=np.float32) aps = np.zeros((num_scales, num_classes), dtype=np.float32) num_gts = np.zeros((num_scales, num_classes), dtype=int) for i, cls_result in enumerate(results): if cls_result['recall'].size > 0: recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] aps[:, i] = cls_result['ap'] num_gts[:, i] = cls_result['num_gts'] if dataset is None: label_names = [str(i) for i in range(num_classes)] elif mmcv.is_str(dataset): label_names = get_classes(dataset) else: label_names = dataset if not isinstance(mean_ap, list): mean_ap = [mean_ap] header = ['class', 'gts', 'dets', 'recall', 'ap'] for i in range(num_scales): if scale_ranges is not None: print_log(f'Scale range {scale_ranges[i]}', logger=logger) table_data = [header] for j in range(num_classes): row_data = [ label_names[j], num_gts[i, j], results[j]['num_dets'], f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' ] table_data.append(row_data) table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) table = AsciiTable(table_data) table.inner_footing_row_border = True print_log('\n' + table.table, logger=logger) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/panoptic_utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # A custom value to distinguish instance ID and category ID; need to # be greater than the number of categories. # For a pixel in the panoptic result map: # pan_id = ins_id * INSTANCE_OFFSET + cat_id INSTANCE_OFFSET = 1000 ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/recall.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from collections.abc import Sequence import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .bbox_overlaps import bbox_overlaps def _recalls(all_ious, proposal_nums, thrs): img_num = all_ious.shape[0] total_gt_num = sum([ious.shape[0] for ious in all_ious]) _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) for k, proposal_num in enumerate(proposal_nums): tmp_ious = np.zeros(0) for i in range(img_num): ious = all_ious[i][:, :proposal_num].copy() gt_ious = np.zeros((ious.shape[0])) if ious.size == 0: tmp_ious = np.hstack((tmp_ious, gt_ious)) continue for j in range(ious.shape[0]): gt_max_overlaps = ious.argmax(axis=1) max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] gt_idx = max_ious.argmax() gt_ious[j] = max_ious[gt_idx] box_idx = gt_max_overlaps[gt_idx] ious[gt_idx, :] = -1 ious[:, box_idx] = -1 tmp_ious = np.hstack((tmp_ious, gt_ious)) _ious[k, :] = tmp_ious _ious = np.fliplr(np.sort(_ious, axis=1)) recalls = np.zeros((proposal_nums.size, thrs.size)) for i, thr in enumerate(thrs): recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) return recalls def set_recall_param(proposal_nums, iou_thrs): """Check proposal_nums and iou_thrs and set correct format.""" if isinstance(proposal_nums, Sequence): _proposal_nums = np.array(proposal_nums) elif isinstance(proposal_nums, int): _proposal_nums = np.array([proposal_nums]) else: _proposal_nums = proposal_nums if iou_thrs is None: _iou_thrs = np.array([0.5]) elif isinstance(iou_thrs, Sequence): _iou_thrs = np.array(iou_thrs) elif isinstance(iou_thrs, float): _iou_thrs = np.array([iou_thrs]) else: _iou_thrs = iou_thrs return _proposal_nums, _iou_thrs def eval_recalls(gts, proposals, proposal_nums=None, iou_thrs=0.5, logger=None, use_legacy_coordinate=False): """Calculate recalls. Args: gts (list[ndarray]): a list of arrays of shape (n, 4) proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5) proposal_nums (int | Sequence[int]): Top N proposals to be evaluated. iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5. logger (logging.Logger | str | None): The way to print the recall summary. See `mmcv.utils.print_log()` for details. Default: None. use_legacy_coordinate (bool): Whether use coordinate system in mmdet v1.x. "1" was added to both height and width which means w, h should be computed as 'x2 - x1 + 1` and 'y2 - y1 + 1'. Default: False. Returns: ndarray: recalls of different ious and proposal nums """ img_num = len(gts) assert img_num == len(proposals) proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) all_ious = [] for i in range(img_num): if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: scores = proposals[i][:, 4] sort_idx = np.argsort(scores)[::-1] img_proposal = proposals[i][sort_idx, :] else: img_proposal = proposals[i] prop_num = min(img_proposal.shape[0], proposal_nums[-1]) if gts[i] is None or gts[i].shape[0] == 0: ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) else: ious = bbox_overlaps( gts[i], img_proposal[:prop_num, :4], use_legacy_coordinate=use_legacy_coordinate) all_ious.append(ious) all_ious = np.array(all_ious) recalls = _recalls(all_ious, proposal_nums, iou_thrs) print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger) return recalls def print_recall_summary(recalls, proposal_nums, iou_thrs, row_idxs=None, col_idxs=None, logger=None): """Print recalls in a table. Args: recalls (ndarray): calculated from `bbox_recalls` proposal_nums (ndarray or list): top N proposals iou_thrs (ndarray or list): iou thresholds row_idxs (ndarray): which rows(proposal nums) to print col_idxs (ndarray): which cols(iou thresholds) to print logger (logging.Logger | str | None): The way to print the recall summary. See `mmcv.utils.print_log()` for details. Default: None. """ proposal_nums = np.array(proposal_nums, dtype=np.int32) iou_thrs = np.array(iou_thrs) if row_idxs is None: row_idxs = np.arange(proposal_nums.size) if col_idxs is None: col_idxs = np.arange(iou_thrs.size) row_header = [''] + iou_thrs[col_idxs].tolist() table_data = [row_header] for i, num in enumerate(proposal_nums[row_idxs]): row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()] row.insert(0, num) table_data.append(row) table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) def plot_num_recall(recalls, proposal_nums): """Plot Proposal_num-Recalls curve. Args: recalls(ndarray or list): shape (k,) proposal_nums(ndarray or list): same shape as `recalls` """ if isinstance(proposal_nums, np.ndarray): _proposal_nums = proposal_nums.tolist() else: _proposal_nums = proposal_nums if isinstance(recalls, np.ndarray): _recalls = recalls.tolist() else: _recalls = recalls import matplotlib.pyplot as plt f = plt.figure() plt.plot([0] + _proposal_nums, [0] + _recalls) plt.xlabel('Proposal num') plt.ylabel('Recall') plt.axis([0, proposal_nums.max(), 0, 1]) f.show() def plot_iou_recall(recalls, iou_thrs): """Plot IoU-Recalls curve. Args: recalls(ndarray or list): shape (k,) iou_thrs(ndarray or list): same shape as `recalls` """ if isinstance(iou_thrs, np.ndarray): _iou_thrs = iou_thrs.tolist() else: _iou_thrs = iou_thrs if isinstance(recalls, np.ndarray): _recalls = recalls.tolist() else: _recalls = recalls import matplotlib.pyplot as plt f = plt.figure() plt.plot(_iou_thrs + [1.0], _recalls + [0.]) plt.xlabel('IoU') plt.ylabel('Recall') plt.axis([iou_thrs.min(), 1, 0, 1]) f.show() ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/export/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx, get_k_for_topk) from .pytorch2onnx import (build_model_from_cfg, generate_inputs_and_wrap_model, preprocess_example_input) __all__ = [ 'build_model_from_cfg', 'generate_inputs_and_wrap_model', 'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx', 'dynamic_clip_for_onnx' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/export/model_wrappers.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import warnings import numpy as np import torch from mmdet.core import bbox2result from mmdet.models import BaseDetector class DeployBaseDetector(BaseDetector): """DeployBaseDetector.""" def __init__(self, class_names, device_id): super(DeployBaseDetector, self).__init__() self.CLASSES = class_names self.device_id = device_id def simple_test(self, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def aug_test(self, imgs, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def extract_feat(self, imgs): raise NotImplementedError('This method is not implemented.') def forward_train(self, imgs, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def val_step(self, data, optimizer): raise NotImplementedError('This method is not implemented.') def train_step(self, data, optimizer): raise NotImplementedError('This method is not implemented.') def forward_test(self, *, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def async_simple_test(self, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def forward(self, img, img_metas, return_loss=True, **kwargs): outputs = self.forward_test(img, img_metas, **kwargs) batch_dets, batch_labels = outputs[:2] batch_masks = outputs[2] if len(outputs) == 3 else None batch_size = img[0].shape[0] img_metas = img_metas[0] results = [] rescale = kwargs.get('rescale', True) for i in range(batch_size): dets, labels = batch_dets[i], batch_labels[i] if rescale: scale_factor = img_metas[i]['scale_factor'] if isinstance(scale_factor, (list, tuple, np.ndarray)): assert len(scale_factor) == 4 scale_factor = np.array(scale_factor)[None, :] # [1,4] dets[:, :4] /= scale_factor if 'border' in img_metas[i]: # offset pixel of the top-left corners between original image # and padded/enlarged image, 'border' is used when exporting # CornerNet and CentripetalNet to onnx x_off = img_metas[i]['border'][2] y_off = img_metas[i]['border'][0] dets[:, [0, 2]] -= x_off dets[:, [1, 3]] -= y_off dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype) dets_results = bbox2result(dets, labels, len(self.CLASSES)) if batch_masks is not None: masks = batch_masks[i] img_h, img_w = img_metas[i]['img_shape'][:2] ori_h, ori_w = img_metas[i]['ori_shape'][:2] masks = masks[:, :img_h, :img_w] if rescale: masks = masks.astype(np.float32) masks = torch.from_numpy(masks) masks = torch.nn.functional.interpolate( masks.unsqueeze(0), size=(ori_h, ori_w)) masks = masks.squeeze(0).detach().numpy() if masks.dtype != np.bool: masks = masks >= 0.5 segms_results = [[] for _ in range(len(self.CLASSES))] for j in range(len(dets)): segms_results[labels[j]].append(masks[j]) results.append((dets_results, segms_results)) else: results.append(dets_results) return results class ONNXRuntimeDetector(DeployBaseDetector): """Wrapper for detector's inference with ONNXRuntime.""" def __init__(self, onnx_file, class_names, device_id): super(ONNXRuntimeDetector, self).__init__(class_names, device_id) import onnxruntime as ort # get the custom op path ort_custom_op_path = '' try: from mmcv.ops import get_onnxruntime_op_path ort_custom_op_path = get_onnxruntime_op_path() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, \ you may have to build mmcv with ONNXRuntime from source.') session_options = ort.SessionOptions() # register custom op for onnxruntime if osp.exists(ort_custom_op_path): session_options.register_custom_ops_library(ort_custom_op_path) sess = ort.InferenceSession(onnx_file, session_options) providers = ['CPUExecutionProvider'] options = [{}] is_cuda_available = ort.get_device() == 'GPU' if is_cuda_available: providers.insert(0, 'CUDAExecutionProvider') options.insert(0, {'device_id': device_id}) sess.set_providers(providers, options) self.sess = sess self.io_binding = sess.io_binding() self.output_names = [_.name for _ in sess.get_outputs()] self.is_cuda_available = is_cuda_available def forward_test(self, imgs, img_metas, **kwargs): input_data = imgs[0] # set io binding for inputs/outputs device_type = 'cuda' if self.is_cuda_available else 'cpu' if not self.is_cuda_available: input_data = input_data.cpu() self.io_binding.bind_input( name='input', device_type=device_type, device_id=self.device_id, element_type=np.float32, shape=input_data.shape, buffer_ptr=input_data.data_ptr()) for name in self.output_names: self.io_binding.bind_output(name) # run session to get outputs self.sess.run_with_iobinding(self.io_binding) ort_outputs = self.io_binding.copy_outputs_to_cpu() return ort_outputs class TensorRTDetector(DeployBaseDetector): """Wrapper for detector's inference with TensorRT.""" def __init__(self, engine_file, class_names, device_id, output_names=None): super(TensorRTDetector, self).__init__(class_names, device_id) warnings.warn('`output_names` is deprecated and will be removed in ' 'future releases.') from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin try: load_tensorrt_plugin() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, \ you may have to build mmcv with TensorRT from source.') output_names = ['dets', 'labels'] model = TRTWraper(engine_file, ['input'], output_names) with_masks = False # if TensorRT has totally 4 inputs/outputs, then # the detector should have `mask` output. if len(model.engine) == 4: model.output_names = output_names + ['masks'] with_masks = True self.model = model self.with_masks = with_masks def forward_test(self, imgs, img_metas, **kwargs): input_data = imgs[0].contiguous() with torch.cuda.device(self.device_id), torch.no_grad(): outputs = self.model({'input': input_data}) outputs = [outputs[name] for name in self.model.output_names] outputs = [out.detach().cpu().numpy() for out in outputs] return outputs ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/export/onnx_helper.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os import torch def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape): """Clip boxes dynamically for onnx. Since torch.clamp cannot have dynamic `min` and `max`, we scale the boxes by 1/max_shape and clamp in the range [0, 1]. Args: x1 (Tensor): The x1 for bounding boxes. y1 (Tensor): The y1 for bounding boxes. x2 (Tensor): The x2 for bounding boxes. y2 (Tensor): The y2 for bounding boxes. max_shape (Tensor or torch.Size): The (H,W) of original image. Returns: tuple(Tensor): The clipped x1, y1, x2, y2. """ assert isinstance( max_shape, torch.Tensor), '`max_shape` should be tensor of (h,w) for onnx' # scale by 1/max_shape x1 = x1 / max_shape[1] y1 = y1 / max_shape[0] x2 = x2 / max_shape[1] y2 = y2 / max_shape[0] # clamp [0, 1] x1 = torch.clamp(x1, 0, 1) y1 = torch.clamp(y1, 0, 1) x2 = torch.clamp(x2, 0, 1) y2 = torch.clamp(y2, 0, 1) # scale back x1 = x1 * max_shape[1] y1 = y1 * max_shape[0] x2 = x2 * max_shape[1] y2 = y2 * max_shape[0] return x1, y1, x2, y2 def get_k_for_topk(k, size): """Get k of TopK for onnx exporting. The K of TopK in TensorRT should not be a Tensor, while in ONNX Runtime it could be a Tensor.Due to dynamic shape feature, we have to decide whether to do TopK and what K it should be while exporting to ONNX. If returned K is less than zero, it means we do not have to do TopK operation. Args: k (int or Tensor): The set k value for nms from config file. size (Tensor or torch.Size): The number of elements of \ TopK's input tensor Returns: tuple: (int or Tensor): The final K for TopK. """ ret_k = -1 if k <= 0 or size <= 0: return ret_k if torch.onnx.is_in_onnx_export(): is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' if is_trt_backend: # TensorRT does not support dynamic K with TopK op if 0 < k < size: ret_k = k else: # Always keep topk op for dynamic input in onnx for ONNX Runtime ret_k = torch.where(k < size, k, size) elif k < size: ret_k = k else: # ret_k is -1 pass return ret_k def add_dummy_nms_for_onnx(boxes, scores, max_output_boxes_per_class=1000, iou_threshold=0.5, score_threshold=0.05, pre_top_k=-1, after_top_k=-1, labels=None): """Create a dummy onnx::NonMaxSuppression op while exporting to ONNX. This function helps exporting to onnx with batch and multiclass NMS op. It only supports class-agnostic detection results. That is, the scores is of shape (N, num_bboxes, num_classes) and the boxes is of shape (N, num_boxes, 4). Args: boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4] scores (Tensor): The detection scores of shape [N, num_boxes, num_classes] max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5 score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (bool): Number of top K boxes to keep before nms. Defaults to -1. after_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. labels (Tensor, optional): It not None, explicit labels would be used. Otherwise, labels would be automatically generated using num_classed. Defaults to None. Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ max_output_boxes_per_class = torch.LongTensor([max_output_boxes_per_class]) iou_threshold = torch.tensor([iou_threshold], dtype=torch.float32) score_threshold = torch.tensor([score_threshold], dtype=torch.float32) batch_size = scores.shape[0] num_class = scores.shape[2] nms_pre = torch.tensor(pre_top_k, device=scores.device, dtype=torch.long) nms_pre = get_k_for_topk(nms_pre, boxes.shape[1]) if nms_pre > 0: max_scores, _ = scores.max(-1) _, topk_inds = max_scores.topk(nms_pre) batch_inds = torch.arange(batch_size).view( -1, 1).expand_as(topk_inds).long() # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = boxes.shape[1] * batch_inds + topk_inds boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) scores = scores.reshape(-1, num_class)[transformed_inds, :].reshape( batch_size, -1, num_class) if labels is not None: labels = labels.reshape(-1, 1)[transformed_inds].reshape( batch_size, -1) scores = scores.permute(0, 2, 1) num_box = boxes.shape[1] # turn off tracing to create a dummy output of nms state = torch._C._get_tracing_state() # dummy indices of nms's output num_fake_det = 2 batch_inds = torch.randint(batch_size, (num_fake_det, 1)) cls_inds = torch.randint(num_class, (num_fake_det, 1)) box_inds = torch.randint(num_box, (num_fake_det, 1)) indices = torch.cat([batch_inds, cls_inds, box_inds], dim=1) output = indices setattr(DummyONNXNMSop, 'output', output) # open tracing torch._C._set_tracing_state(state) selected_indices = DummyONNXNMSop.apply(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) batch_inds, cls_inds = selected_indices[:, 0], selected_indices[:, 1] box_inds = selected_indices[:, 2] if labels is None: labels = torch.arange(num_class, dtype=torch.long).to(scores.device) labels = labels.view(1, num_class, 1).expand_as(scores) scores = scores.reshape(-1, 1) boxes = boxes.reshape(batch_size, -1).repeat(1, num_class).reshape(-1, 4) pos_inds = (num_class * batch_inds + cls_inds) * num_box + box_inds mask = scores.new_zeros(scores.shape) # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 # PyTorch style code: mask[batch_inds, box_inds] += 1 mask[pos_inds, :] += 1 scores = scores * mask boxes = boxes * mask scores = scores.reshape(batch_size, -1) boxes = boxes.reshape(batch_size, -1, 4) labels = labels.reshape(batch_size, -1) nms_after = torch.tensor( after_top_k, device=scores.device, dtype=torch.long) nms_after = get_k_for_topk(nms_after, num_box * num_class) if nms_after > 0: _, topk_inds = scores.topk(nms_after) batch_inds = torch.arange(batch_size).view(-1, 1).expand_as(topk_inds) # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = scores.shape[1] * batch_inds + topk_inds scores = scores.reshape(-1, 1)[transformed_inds, :].reshape( batch_size, -1) boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) labels = labels.reshape(-1, 1)[transformed_inds, :].reshape( batch_size, -1) scores = scores.unsqueeze(2) dets = torch.cat([boxes, scores], dim=2) return dets, labels class DummyONNXNMSop(torch.autograd.Function): """DummyONNXNMSop. This class is only for creating onnx::NonMaxSuppression. """ @staticmethod def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): return DummyONNXNMSop.output @staticmethod def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): return g.op( 'NonMaxSuppression', boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, outputs=1) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/export/pytorch2onnx.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from functools import partial import mmcv import numpy as np import torch from mmcv.runner import load_checkpoint def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config, cfg_options=None): """Prepare sample input and wrap model for ONNX export. The ONNX export API only accept args, and all inputs should be torch.Tensor or corresponding types (such as tuple of tensor). So we should call this function before exporting. This function will: 1. generate corresponding inputs which are used to execute the model. 2. Wrap the model's forward function. For example, the MMDet models' forward function has a parameter ``return_loss:bool``. As we want to set it as False while export API supports neither bool type or kwargs. So we have to replace the forward method like ``model.forward = partial(model.forward, return_loss=False)``. Args: config_path (str): the OpenMMLab config for the model we want to export to ONNX checkpoint_path (str): Path to the corresponding checkpoint input_config (dict): the exactly data in this dict depends on the framework. For MMSeg, we can just declare the input shape, and generate the dummy data accordingly. However, for MMDet, we may pass the real img path, or the NMS will return None as there is no legal bbox. Returns: tuple: (model, tensor_data) wrapped model which can be called by ``model(*tensor_data)`` and a list of inputs which are used to execute the model while exporting. """ model = build_model_from_cfg( config_path, checkpoint_path, cfg_options=cfg_options) one_img, one_meta = preprocess_example_input(input_config) tensor_data = [one_img] model.forward = partial( model.forward, img_metas=[[one_meta]], return_loss=False) # pytorch has some bug in pytorch1.3, we have to fix it # by replacing these existing op opset_version = 11 # put the import within the function thus it will not cause import error # when not using this function try: from mmcv.onnx.symbolic import register_extra_symbolics except ModuleNotFoundError: raise NotImplementedError('please update mmcv to version>=v1.0.4') register_extra_symbolics(opset_version) return model, tensor_data def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None): """Build a model from config and load the given checkpoint. Args: config_path (str): the OpenMMLab config for the model we want to export to ONNX checkpoint_path (str): Path to the corresponding checkpoint Returns: torch.nn.Module: the built model """ from mmdet.models import build_detector cfg = mmcv.Config.fromfile(config_path) if cfg_options is not None: cfg.merge_from_dict(cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the model cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu') if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: from mmdet.datasets import DATASETS dataset = DATASETS.get(cfg.data.test['type']) assert (dataset is not None) model.CLASSES = dataset.CLASSES model.cpu().eval() return model def preprocess_example_input(input_config): """Prepare an example input image for ``generate_inputs_and_wrap_model``. Args: input_config (dict): customized config describing the example input. Returns: tuple: (one_img, one_meta), tensor of the example input image and \ meta information for the example input image. Examples: >>> from mmdet.core.export import preprocess_example_input >>> input_config = { >>> 'input_shape': (1,3,224,224), >>> 'input_path': 'demo/demo.jpg', >>> 'normalize_cfg': { >>> 'mean': (123.675, 116.28, 103.53), >>> 'std': (58.395, 57.12, 57.375) >>> } >>> } >>> one_img, one_meta = preprocess_example_input(input_config) >>> print(one_img.shape) torch.Size([1, 3, 224, 224]) >>> print(one_meta) {'img_shape': (224, 224, 3), 'ori_shape': (224, 224, 3), 'pad_shape': (224, 224, 3), 'filename': '.png', 'scale_factor': 1.0, 'flip': False} """ input_path = input_config['input_path'] input_shape = input_config['input_shape'] one_img = mmcv.imread(input_path) one_img = mmcv.imresize(one_img, input_shape[2:][::-1]) show_img = one_img.copy() if 'normalize_cfg' in input_config.keys(): normalize_cfg = input_config['normalize_cfg'] mean = np.array(normalize_cfg['mean'], dtype=np.float32) std = np.array(normalize_cfg['std'], dtype=np.float32) to_rgb = normalize_cfg.get('to_rgb', True) one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb) one_img = one_img.transpose(2, 0, 1) one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_( True) (_, C, H, W) = input_shape one_meta = { 'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '.png', 'scale_factor': np.ones(4, dtype=np.float32), 'flip': False, 'show_img': show_img, 'flip_direction': None } return one_img, one_meta ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .checkloss_hook import CheckInvalidLossHook from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook from .memory_profiler_hook import MemoryProfilerHook from .set_epoch_info_hook import SetEpochInfoHook from .sync_norm_hook import SyncNormHook from .sync_random_size_hook import SyncRandomSizeHook from .wandblogger_hook import MMDetWandbHook from .yolox_lrupdater_hook import YOLOXLrUpdaterHook from .yolox_mode_switch_hook import YOLOXModeSwitchHook __all__ = [ 'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook', 'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook', 'CheckInvalidLossHook', 'SetEpochInfoHook', 'MemoryProfilerHook', 'MMDetWandbHook' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/checkloss_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class CheckInvalidLossHook(Hook): """Check invalid loss hook. This hook will regularly check whether the loss is valid during training. Args: interval (int): Checking interval (every k iterations). Default: 50. """ def __init__(self, interval=50): self.interval = interval def after_train_iter(self, runner): if self.every_n_iters(runner, self.interval): assert torch.isfinite(runner.outputs['loss']), \ runner.logger.info('loss become infinite or NaN!') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/ema.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook class BaseEMAHook(Hook): """Exponential Moving Average Hook. Use Exponential Moving Average on all parameters of model in training process. All parameters have a ema backup, which update by the formula as below. EMAHook takes priority over EvalHook and CheckpointHook. Note, the original model parameters are actually saved in ema field after train. Args: momentum (float): The momentum used for updating ema parameter. Ema's parameter are updated with the formula: `ema_param = (1-momentum) * ema_param + momentum * cur_param`. Defaults to 0.0002. skip_buffers (bool): Whether to skip the model buffers, such as batchnorm running stats (running_mean, running_var), it does not perform the ema operation. Default to False. interval (int): Update ema parameter every interval iteration. Defaults to 1. resume_from (str, optional): The checkpoint path. Defaults to None. momentum_fun (func, optional): The function to change momentum during early iteration (also warmup) to help early training. It uses `momentum` as a constant. Defaults to None. """ def __init__(self, momentum=0.0002, interval=1, skip_buffers=False, resume_from=None, momentum_fun=None): assert 0 < momentum < 1 self.momentum = momentum self.skip_buffers = skip_buffers self.interval = interval self.checkpoint = resume_from self.momentum_fun = momentum_fun def before_run(self, runner): """To resume model with it's ema parameters more friendly. Register ema parameter as ``named_buffer`` to model. """ model = runner.model if is_module_wrapper(model): model = model.module self.param_ema_buffer = {} if self.skip_buffers: self.model_parameters = dict(model.named_parameters()) else: self.model_parameters = model.state_dict() for name, value in self.model_parameters.items(): # "." is not allowed in module's buffer name buffer_name = f"ema_{name.replace('.', '_')}" self.param_ema_buffer[name] = buffer_name model.register_buffer(buffer_name, value.data.clone()) self.model_buffers = dict(model.named_buffers()) if self.checkpoint is not None: runner.resume(self.checkpoint) def get_momentum(self, runner): return self.momentum_fun(runner.iter) if self.momentum_fun else \ self.momentum def after_train_iter(self, runner): """Update ema parameter every self.interval iterations.""" if (runner.iter + 1) % self.interval != 0: return momentum = self.get_momentum(runner) for name, parameter in self.model_parameters.items(): # exclude num_tracking if parameter.dtype.is_floating_point: buffer_name = self.param_ema_buffer[name] buffer_parameter = self.model_buffers[buffer_name] buffer_parameter.mul_(1 - momentum).add_( parameter.data, alpha=momentum) def after_train_epoch(self, runner): """We load parameter values from ema backup to model before the EvalHook.""" self._swap_ema_parameters() def before_train_epoch(self, runner): """We recover model's parameter from ema backup after last epoch's EvalHook.""" self._swap_ema_parameters() def _swap_ema_parameters(self): """Swap the parameter of model with parameter in ema_buffer.""" for name, value in self.model_parameters.items(): temp = value.data.clone() ema_buffer = self.model_buffers[self.param_ema_buffer[name]] value.data.copy_(ema_buffer.data) ema_buffer.data.copy_(temp) @HOOKS.register_module() class ExpMomentumEMAHook(BaseEMAHook): """EMAHook using exponential momentum strategy. Args: total_iter (int): The total number of iterations of EMA momentum. Defaults to 2000. """ def __init__(self, total_iter=2000, **kwargs): super(ExpMomentumEMAHook, self).__init__(**kwargs) self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-( 1 + x) / total_iter) + self.momentum @HOOKS.register_module() class LinearMomentumEMAHook(BaseEMAHook): """EMAHook using linear momentum strategy. Args: warm_up (int): During first warm_up steps, we may use smaller decay to update ema parameters more slowly. Defaults to 100. """ def __init__(self, warm_up=100, **kwargs): super(LinearMomentumEMAHook, self).__init__(**kwargs) self.momentum_fun = lambda x: min(self.momentum**self.interval, (1 + x) / (warm_up + x)) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/memory_profiler_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class MemoryProfilerHook(Hook): """Memory profiler hook recording memory information including virtual memory, swap memory, and the memory of the current process. Args: interval (int): Checking interval (every k iterations). Default: 50. """ def __init__(self, interval=50): try: from psutil import swap_memory, virtual_memory self._swap_memory = swap_memory self._virtual_memory = virtual_memory except ImportError: raise ImportError('psutil is not installed, please install it by: ' 'pip install psutil') try: from memory_profiler import memory_usage self._memory_usage = memory_usage except ImportError: raise ImportError( 'memory_profiler is not installed, please install it by: ' 'pip install memory_profiler') self.interval = interval def after_iter(self, runner): if self.every_n_iters(runner, self.interval): # in Byte virtual_memory = self._virtual_memory() swap_memory = self._swap_memory() # in MB process_memory = self._memory_usage()[0] factor = 1024 * 1024 runner.logger.info( 'Memory information ' 'available_memory: ' f'{round(virtual_memory.available / factor)} MB, ' 'used_memory: ' f'{round(virtual_memory.used / factor)} MB, ' f'memory_utilization: {virtual_memory.percent} %, ' 'available_swap_memory: ' f'{round((swap_memory.total - swap_memory.used) / factor)}' ' MB, ' f'used_swap_memory: {round(swap_memory.used / factor)} MB, ' f'swap_memory_utilization: {swap_memory.percent} %, ' 'current_process_memory: ' f'{round(process_memory)} MB') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/set_epoch_info_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.parallel import is_module_wrapper from mmcv.runner import HOOKS, Hook @HOOKS.register_module() class SetEpochInfoHook(Hook): """Set runner's epoch information to the model.""" def before_train_epoch(self, runner): epoch = runner.epoch model = runner.model if is_module_wrapper(model): model = model.module model.set_epoch(epoch) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/sync_norm_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmcv.runner import get_dist_info from mmcv.runner.hooks import HOOKS, Hook from torch import nn from ..utils.dist_utils import all_reduce_dict def get_norm_states(module): async_norm_states = OrderedDict() for name, child in module.named_modules(): if isinstance(child, nn.modules.batchnorm._NormBase): for k, v in child.state_dict().items(): async_norm_states['.'.join([name, k])] = v return async_norm_states @HOOKS.register_module() class SyncNormHook(Hook): """Synchronize Norm states after training epoch, currently used in YOLOX. Args: num_last_epochs (int): The number of latter epochs in the end of the training to switch to synchronizing norm interval. Default: 15. interval (int): Synchronizing norm interval. Default: 1. """ def __init__(self, num_last_epochs=15, interval=1): self.interval = interval self.num_last_epochs = num_last_epochs def before_train_epoch(self, runner): epoch = runner.epoch if (epoch + 1) == runner.max_epochs - self.num_last_epochs: # Synchronize norm every epoch. self.interval = 1 def after_train_epoch(self, runner): """Synchronizing norm.""" epoch = runner.epoch module = runner.model if (epoch + 1) % self.interval == 0: _, world_size = get_dist_info() if world_size == 1: return norm_states = get_norm_states(module) if len(norm_states) == 0: return norm_states = all_reduce_dict(norm_states, op='mean') module.load_state_dict(norm_states, strict=False) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/sync_random_size_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import random import warnings import torch from mmcv.runner import get_dist_info from mmcv.runner.hooks import HOOKS, Hook from torch import distributed as dist @HOOKS.register_module() class SyncRandomSizeHook(Hook): """Change and synchronize the random image size across ranks. SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve similar functions. Such as `dict(type='Resize', img_scale=[(448, 448), (832, 832)], multiscale_mode='range', keep_ratio=True)`. Note: Due to the multi-process dataloader, its behavior is different from YOLOX's official implementation, the official is to change the size every fixed iteration interval and what we achieved is a fixed epoch interval. Args: ratio_range (tuple[int]): Random ratio range. It will be multiplied by 32, and then change the dataset output image size. Default: (14, 26). img_scale (tuple[int]): Size of input image. Default: (640, 640). interval (int): The epoch interval of change image size. Default: 1. device (torch.device | str): device for returned tensors. Default: 'cuda'. """ def __init__(self, ratio_range=(14, 26), img_scale=(640, 640), interval=1, device='cuda'): warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. ' 'Please use Resize pipeline to achieve similar ' 'functions. Due to the multi-process dataloader, ' 'its behavior is different from YOLOX\'s official ' 'implementation, the official is to change the size ' 'every fixed iteration interval and what we achieved ' 'is a fixed epoch interval.') self.rank, world_size = get_dist_info() self.is_distributed = world_size > 1 self.ratio_range = ratio_range self.img_scale = img_scale self.interval = interval self.device = device def after_train_epoch(self, runner): """Change the dataset output image size.""" if self.ratio_range is not None and (runner.epoch + 1) % self.interval == 0: # Due to DDP and DP get the device behavior inconsistent, # so we did not get the device from runner.model. tensor = torch.LongTensor(2).to(self.device) if self.rank == 0: size_factor = self.img_scale[1] * 1. / self.img_scale[0] size = random.randint(*self.ratio_range) size = (int(32 * size), 32 * int(size * size_factor)) tensor[0] = size[0] tensor[1] = size[1] if self.is_distributed: dist.barrier() dist.broadcast(tensor, 0) runner.data_loader.dataset.update_dynamic_scale( (tensor[0].item(), tensor[1].item())) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/wandblogger_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import importlib import os.path as osp import sys import warnings import mmcv import numpy as np import pycocotools.mask as mask_util from mmcv.runner import HOOKS from mmcv.runner.dist_utils import master_only from mmcv.runner.hooks.checkpoint import CheckpointHook from mmcv.runner.hooks.logger.wandb import WandbLoggerHook from mmcv.utils import digit_version from mmdet.core import DistEvalHook, EvalHook from mmdet.core.mask.structures import polygon_to_bitmap @HOOKS.register_module() class MMDetWandbHook(WandbLoggerHook): """Enhanced Wandb logger hook for MMDetection. Comparing with the :cls:`mmcv.runner.WandbLoggerHook`, this hook can not only automatically log all the metrics but also log the following extra information - saves model checkpoints as W&B Artifact, and logs model prediction as interactive W&B Tables. - Metrics: The MMDetWandbHook will automatically log training and validation metrics along with system metrics (CPU/GPU). - Checkpointing: If `log_checkpoint` is True, the checkpoint saved at every checkpoint interval will be saved as W&B Artifacts. This depends on the : class:`mmcv.runner.CheckpointHook` whose priority is higher than this hook. Please refer to https://docs.wandb.ai/guides/artifacts/model-versioning to learn more about model versioning with W&B Artifacts. - Checkpoint Metadata: If evaluation results are available for a given checkpoint artifact, it will have a metadata associated with it. The metadata contains the evaluation metrics computed on validation data with that checkpoint along with the current epoch. It depends on `EvalHook` whose priority is more than MMDetWandbHook. - Evaluation: At every evaluation interval, the `MMDetWandbHook` logs the model prediction as interactive W&B Tables. The number of samples logged is given by `num_eval_images`. Currently, the `MMDetWandbHook` logs the predicted bounding boxes along with the ground truth at every evaluation interval. This depends on the `EvalHook` whose priority is more than `MMDetWandbHook`. Also note that the data is just logged once and subsequent evaluation tables uses reference to the logged data to save memory usage. Please refer to https://docs.wandb.ai/guides/data-vis to learn more about W&B Tables. For more details check out W&B's MMDetection docs: https://docs.wandb.ai/guides/integrations/mmdetection ``` Example: log_config = dict( ... hooks=[ ..., dict(type='MMDetWandbHook', init_kwargs={ 'entity': "YOUR_ENTITY", 'project': "YOUR_PROJECT_NAME" }, interval=50, log_checkpoint=True, log_checkpoint_metadata=True, num_eval_images=100, bbox_score_thr=0.3) ]) ``` Args: init_kwargs (dict): A dict passed to wandb.init to initialize a W&B run. Please refer to https://docs.wandb.ai/ref/python/init for possible key-value pairs. interval (int): Logging interval (every k iterations). Defaults to 50. log_checkpoint (bool): Save the checkpoint at every checkpoint interval as W&B Artifacts. Use this for model versioning where each version is a checkpoint. Defaults to False. log_checkpoint_metadata (bool): Log the evaluation metrics computed on the validation data with the checkpoint, along with current epoch as a metadata to that checkpoint. Defaults to True. num_eval_images (int): The number of validation images to be logged. If zero, the evaluation won't be logged. Defaults to 100. bbox_score_thr (float): Threshold for bounding box scores. Defaults to 0.3. """ def __init__(self, init_kwargs=None, interval=50, log_checkpoint=False, log_checkpoint_metadata=False, num_eval_images=100, bbox_score_thr=0.3, **kwargs): super(MMDetWandbHook, self).__init__(init_kwargs, interval, **kwargs) self.log_checkpoint = log_checkpoint self.log_checkpoint_metadata = ( log_checkpoint and log_checkpoint_metadata) self.num_eval_images = num_eval_images self.bbox_score_thr = bbox_score_thr self.log_evaluation = (num_eval_images > 0) self.ckpt_hook: CheckpointHook = None self.eval_hook: EvalHook = None def import_wandb(self): try: import wandb from wandb import init # noqa # Fix ResourceWarning when calling wandb.log in wandb v0.12.10. # https://github.com/wandb/client/issues/2837 if digit_version(wandb.__version__) < digit_version('0.12.10'): warnings.warn( f'The current wandb {wandb.__version__} is ' f'lower than v0.12.10 will cause ResourceWarning ' f'when calling wandb.log, Please run ' f'"pip install --upgrade wandb"') except ImportError: raise ImportError( 'Please run "pip install "wandb>=0.12.10"" to install wandb') self.wandb = wandb @master_only def before_run(self, runner): super(MMDetWandbHook, self).before_run(runner) # Save and Log config. if runner.meta is not None and runner.meta.get('exp_name', None) is not None: src_cfg_path = osp.join(runner.work_dir, runner.meta.get('exp_name', None)) if osp.exists(src_cfg_path): self.wandb.save(src_cfg_path, base_path=runner.work_dir) self._update_wandb_config(runner) else: runner.logger.warning('No meta information found in the runner. ') # Inspect CheckpointHook and EvalHook for hook in runner.hooks: if isinstance(hook, CheckpointHook): self.ckpt_hook = hook if isinstance(hook, (EvalHook, DistEvalHook)): self.eval_hook = hook # Check conditions to log checkpoint if self.log_checkpoint: if self.ckpt_hook is None: self.log_checkpoint = False self.log_checkpoint_metadata = False runner.logger.warning( 'To log checkpoint in MMDetWandbHook, `CheckpointHook` is' 'required, please check hooks in the runner.') else: self.ckpt_interval = self.ckpt_hook.interval # Check conditions to log evaluation if self.log_evaluation or self.log_checkpoint_metadata: if self.eval_hook is None: self.log_evaluation = False self.log_checkpoint_metadata = False runner.logger.warning( 'To log evaluation or checkpoint metadata in ' 'MMDetWandbHook, `EvalHook` or `DistEvalHook` in mmdet ' 'is required, please check whether the validation ' 'is enabled.') else: self.eval_interval = self.eval_hook.interval self.val_dataset = self.eval_hook.dataloader.dataset # Determine the number of samples to be logged. if self.num_eval_images > len(self.val_dataset): self.num_eval_images = len(self.val_dataset) runner.logger.warning( f'The num_eval_images ({self.num_eval_images}) is ' 'greater than the total number of validation samples ' f'({len(self.val_dataset)}). The complete validation ' 'dataset will be logged.') # Check conditions to log checkpoint metadata if self.log_checkpoint_metadata: assert self.ckpt_interval % self.eval_interval == 0, \ 'To log checkpoint metadata in MMDetWandbHook, the interval ' \ f'of checkpoint saving ({self.ckpt_interval}) should be ' \ 'divisible by the interval of evaluation ' \ f'({self.eval_interval}).' # Initialize evaluation table if self.log_evaluation: # Initialize data table self._init_data_table() # Add data to the data table self._add_ground_truth(runner) # Log ground truth data self._log_data_table() @master_only def after_train_epoch(self, runner): super(MMDetWandbHook, self).after_train_epoch(runner) if not self.by_epoch: return # Log checkpoint and metadata. if (self.log_checkpoint and self.every_n_epochs(runner, self.ckpt_interval) or (self.ckpt_hook.save_last and self.is_last_epoch(runner))): if self.log_checkpoint_metadata and self.eval_hook: metadata = { 'epoch': runner.epoch + 1, **self._get_eval_results() } else: metadata = None aliases = [f'epoch_{runner.epoch + 1}', 'latest'] model_path = osp.join(self.ckpt_hook.out_dir, f'epoch_{runner.epoch + 1}.pth') self._log_ckpt_as_artifact(model_path, aliases, metadata) # Save prediction table if self.log_evaluation and self.eval_hook._should_evaluate(runner): results = self.eval_hook.latest_results # Initialize evaluation table self._init_pred_table() # Log predictions self._log_predictions(results) # Log the table self._log_eval_table(runner.epoch + 1) @master_only def after_train_iter(self, runner): if self.get_mode(runner) == 'train': # An ugly patch. The iter-based eval hook will call the # `after_train_iter` method of all logger hooks before evaluation. # Use this trick to skip that call. # Don't call super method at first, it will clear the log_buffer return super(MMDetWandbHook, self).after_train_iter(runner) else: super(MMDetWandbHook, self).after_train_iter(runner) if self.by_epoch: return # Save checkpoint and metadata if (self.log_checkpoint and self.every_n_iters(runner, self.ckpt_interval) or (self.ckpt_hook.save_last and self.is_last_iter(runner))): if self.log_checkpoint_metadata and self.eval_hook: metadata = { 'iter': runner.iter + 1, **self._get_eval_results() } else: metadata = None aliases = [f'iter_{runner.iter + 1}', 'latest'] model_path = osp.join(self.ckpt_hook.out_dir, f'iter_{runner.iter + 1}.pth') self._log_ckpt_as_artifact(model_path, aliases, metadata) # Save prediction table if self.log_evaluation and self.eval_hook._should_evaluate(runner): results = self.eval_hook.latest_results # Initialize evaluation table self._init_pred_table() # Log predictions self._log_predictions(results) # Log the table self._log_eval_table(runner.iter + 1) @master_only def after_run(self, runner): self.wandb.finish() def _update_wandb_config(self, runner): """Update wandb config.""" # Import the config file. sys.path.append(runner.work_dir) config_filename = runner.meta['exp_name'][:-3] configs = importlib.import_module(config_filename) # Prepare a nested dict of config variables. config_keys = [key for key in dir(configs) if not key.startswith('__')] config_dict = {key: getattr(configs, key) for key in config_keys} # Update the W&B config. self.wandb.config.update(config_dict) def _log_ckpt_as_artifact(self, model_path, aliases, metadata=None): """Log model checkpoint as W&B Artifact. Args: model_path (str): Path of the checkpoint to log. aliases (list): List of the aliases associated with this artifact. metadata (dict, optional): Metadata associated with this artifact. """ model_artifact = self.wandb.Artifact( f'run_{self.wandb.run.id}_model', type='model', metadata=metadata) model_artifact.add_file(model_path) self.wandb.log_artifact(model_artifact, aliases=aliases) def _get_eval_results(self): """Get model evaluation results.""" results = self.eval_hook.latest_results eval_results = self.val_dataset.evaluate( results, logger='silent', **self.eval_hook.eval_kwargs) return eval_results def _init_data_table(self): """Initialize the W&B Tables for validation data.""" columns = ['image_name', 'image'] self.data_table = self.wandb.Table(columns=columns) def _init_pred_table(self): """Initialize the W&B Tables for model evaluation.""" columns = ['image_name', 'ground_truth', 'prediction'] self.eval_table = self.wandb.Table(columns=columns) def _add_ground_truth(self, runner): # Get image loading pipeline from mmdet.datasets.pipelines import LoadImageFromFile img_loader = None for t in self.val_dataset.pipeline.transforms: if isinstance(t, LoadImageFromFile): img_loader = t if img_loader is None: self.log_evaluation = False runner.logger.warning( 'LoadImageFromFile is required to add images ' 'to W&B Tables.') return # Select the images to be logged. self.eval_image_indexs = np.arange(len(self.val_dataset)) # Set seed so that same validation set is logged each time. np.random.seed(42) np.random.shuffle(self.eval_image_indexs) self.eval_image_indexs = self.eval_image_indexs[:self.num_eval_images] CLASSES = self.val_dataset.CLASSES self.class_id_to_label = { id + 1: name for id, name in enumerate(CLASSES) } self.class_set = self.wandb.Classes([{ 'id': id, 'name': name } for id, name in self.class_id_to_label.items()]) img_prefix = self.val_dataset.img_prefix for idx in self.eval_image_indexs: img_info = self.val_dataset.data_infos[idx] image_name = img_info.get('filename', f'img_{idx}') img_height, img_width = img_info['height'], img_info['width'] img_meta = img_loader( dict(img_info=img_info, img_prefix=img_prefix)) # Get image and convert from BGR to RGB image = mmcv.bgr2rgb(img_meta['img']) data_ann = self.val_dataset.get_ann_info(idx) bboxes = data_ann['bboxes'] labels = data_ann['labels'] masks = data_ann.get('masks', None) # Get dict of bounding boxes to be logged. assert len(bboxes) == len(labels) wandb_boxes = self._get_wandb_bboxes(bboxes, labels) # Get dict of masks to be logged. if masks is not None: wandb_masks = self._get_wandb_masks( masks, labels, is_poly_mask=True, height=img_height, width=img_width) else: wandb_masks = None # TODO: Panoramic segmentation visualization. # Log a row to the data table. self.data_table.add_data( image_name, self.wandb.Image( image, boxes=wandb_boxes, masks=wandb_masks, classes=self.class_set)) def _log_predictions(self, results): table_idxs = self.data_table_ref.get_index() assert len(table_idxs) == len(self.eval_image_indexs) for ndx, eval_image_index in enumerate(self.eval_image_indexs): # Get the result result = results[eval_image_index] if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = result, None assert len(bbox_result) == len(self.class_id_to_label) # Get labels bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) # Get segmentation mask if available. segms = None if segm_result is not None and len(labels) > 0: segms = mmcv.concat_list(segm_result) segms = mask_util.decode(segms) segms = segms.transpose(2, 0, 1) assert len(segms) == len(labels) # TODO: Panoramic segmentation visualization. # Remove bounding boxes and masks with score lower than threshold. if self.bbox_score_thr > 0: assert bboxes is not None and bboxes.shape[1] == 5 scores = bboxes[:, -1] inds = scores > self.bbox_score_thr bboxes = bboxes[inds, :] labels = labels[inds] if segms is not None: segms = segms[inds, ...] # Get dict of bounding boxes to be logged. wandb_boxes = self._get_wandb_bboxes(bboxes, labels, log_gt=False) # Get dict of masks to be logged. if segms is not None: wandb_masks = self._get_wandb_masks(segms, labels) else: wandb_masks = None # Log a row to the eval table. self.eval_table.add_data( self.data_table_ref.data[ndx][0], self.data_table_ref.data[ndx][1], self.wandb.Image( self.data_table_ref.data[ndx][1], boxes=wandb_boxes, masks=wandb_masks, classes=self.class_set)) def _get_wandb_bboxes(self, bboxes, labels, log_gt=True): """Get list of structured dict for logging bounding boxes to W&B. Args: bboxes (list): List of bounding box coordinates in (minX, minY, maxX, maxY) format. labels (int): List of label ids. log_gt (bool): Whether to log ground truth or prediction boxes. Returns: Dictionary of bounding boxes to be logged. """ wandb_boxes = {} box_data = [] for bbox, label in zip(bboxes, labels): if not isinstance(label, int): label = int(label) label = label + 1 if len(bbox) == 5: confidence = float(bbox[4]) class_name = self.class_id_to_label[label] box_caption = f'{class_name} {confidence:.2f}' else: box_caption = str(self.class_id_to_label[label]) position = dict( minX=int(bbox[0]), minY=int(bbox[1]), maxX=int(bbox[2]), maxY=int(bbox[3])) box_data.append({ 'position': position, 'class_id': label, 'box_caption': box_caption, 'domain': 'pixel' }) wandb_bbox_dict = { 'box_data': box_data, 'class_labels': self.class_id_to_label } if log_gt: wandb_boxes['ground_truth'] = wandb_bbox_dict else: wandb_boxes['predictions'] = wandb_bbox_dict return wandb_boxes def _get_wandb_masks(self, masks, labels, is_poly_mask=False, height=None, width=None): """Get list of structured dict for logging masks to W&B. Args: masks (list): List of masks. labels (int): List of label ids. is_poly_mask (bool): Whether the mask is polygonal or not. This is true for CocoDataset. height (int): Height of the image. width (int): Width of the image. Returns: Dictionary of masks to be logged. """ mask_label_dict = dict() for mask, label in zip(masks, labels): label = label + 1 # Get bitmap mask from polygon. if is_poly_mask: if height is not None and width is not None: mask = polygon_to_bitmap(mask, height, width) # Create composite masks for each class. if label not in mask_label_dict.keys(): mask_label_dict[label] = mask else: mask_label_dict[label] = np.logical_or(mask_label_dict[label], mask) wandb_masks = dict() for key, value in mask_label_dict.items(): # Create mask for that class. value = value.astype(np.uint8) value[value > 0] = key # Create dict of masks for logging. class_name = self.class_id_to_label[key] wandb_masks[class_name] = { 'mask_data': value, 'class_labels': self.class_id_to_label } return wandb_masks def _log_data_table(self): """Log the W&B Tables for validation data as artifact and calls `use_artifact` on it so that the evaluation table can use the reference of already uploaded images. This allows the data to be uploaded just once. """ data_artifact = self.wandb.Artifact('val', type='dataset') data_artifact.add(self.data_table, 'val_data') if not self.wandb.run.offline: self.wandb.run.use_artifact(data_artifact) data_artifact.wait() self.data_table_ref = data_artifact.get('val_data') else: self.data_table_ref = self.data_table def _log_eval_table(self, idx): """Log the W&B Tables for model evaluation. The table will be logged multiple times creating new version. Use this to compare models at different intervals interactively. """ pred_artifact = self.wandb.Artifact( f'run_{self.wandb.run.id}_pred', type='evaluation') pred_artifact.add(self.eval_table, 'eval_data') if self.by_epoch: aliases = ['latest', f'epoch_{idx}'] else: aliases = ['latest', f'iter_{idx}'] self.wandb.run.log_artifact(pred_artifact, aliases=aliases) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/yolox_lrupdater_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.runner.hooks import HOOKS from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook, annealing_cos) @HOOKS.register_module() class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): """YOLOX learning rate scheme. There are two main differences between YOLOXLrUpdaterHook and CosineAnnealingLrUpdaterHook. 1. When the current running epoch is greater than `max_epoch-last_epoch`, a fixed learning rate will be used 2. The exp warmup scheme is different with LrUpdaterHook in MMCV Args: num_last_epochs (int): The number of epochs with a fixed learning rate before the end of the training. """ def __init__(self, num_last_epochs, **kwargs): self.num_last_epochs = num_last_epochs super(YOLOXLrUpdaterHook, self).__init__(**kwargs) def get_warmup_lr(self, cur_iters): def _get_warmup_lr(cur_iters, regular_lr): # exp warmup scheme k = self.warmup_ratio * pow( (cur_iters + 1) / float(self.warmup_iters), 2) warmup_lr = [_lr * k for _lr in regular_lr] return warmup_lr if isinstance(self.base_lr, dict): lr_groups = {} for key, base_lr in self.base_lr.items(): lr_groups[key] = _get_warmup_lr(cur_iters, base_lr) return lr_groups else: return _get_warmup_lr(cur_iters, self.base_lr) def get_lr(self, runner, base_lr): last_iter = len(runner.data_loader) * self.num_last_epochs if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters progress += 1 if self.min_lr_ratio is not None: target_lr = base_lr * self.min_lr_ratio else: target_lr = self.min_lr if progress >= max_progress - last_iter: # fixed learning rate return target_lr else: return annealing_cos( base_lr, target_lr, (progress - self.warmup_iters) / (max_progress - self.warmup_iters - last_iter)) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/yolox_mode_switch_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off the mosaic and mixup data augmentation and switches to use L1 loss in bbox_head. Args: num_last_epochs (int): The number of latter epochs in the end of the training to close the data augmentation and switch to L1 loss. Default: 15. skip_type_keys (list[str], optional): Sequence of type string to be skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp') """ def __init__(self, num_last_epochs=15, skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')): self.num_last_epochs = num_last_epochs self.skip_type_keys = skip_type_keys self._restart_dataloader = False def before_train_epoch(self, runner): """Close mosaic and mixup augmentation and switches to use L1 loss.""" epoch = runner.epoch train_loader = runner.data_loader model = runner.model if is_module_wrapper(model): model = model.module if (epoch + 1) == runner.max_epochs - self.num_last_epochs: runner.logger.info('No mosaic and mixup aug now!') # The dataset pipeline cannot be updated when persistent_workers # is True, so we need to force the dataloader's multi-process # restart. This is a very hacky approach. train_loader.dataset.update_skip_type_keys(self.skip_type_keys) if hasattr(train_loader, 'persistent_workers' ) and train_loader.persistent_workers is True: train_loader._DataLoader__initialized = False train_loader._iterator = None self._restart_dataloader = True runner.logger.info('Add additional L1 loss now!') model.bbox_head.use_l1 = True else: # Once the restart is complete, we need to restore # the initialization flag. if self._restart_dataloader: train_loader._DataLoader__initialized = True ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/mask/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .mask_target import mask_target from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks from .utils import encode_mask_results, mask2bbox, split_combined_polys __all__ = [ 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', 'PolygonMasks', 'encode_mask_results', 'mask2bbox' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/mask/mask_target.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from torch.nn.modules.utils import _pair def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg): """Compute mask target for positive proposals in multiple images. Args: pos_proposals_list (list[Tensor]): Positive proposals in multiple images. pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each positive proposals. gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of each image. cfg (dict): Config dict that specifies the mask size. Returns: list[Tensor]: Mask target of each image. Example: >>> import mmcv >>> import mmdet >>> from mmdet.core.mask import BitmapMasks >>> from mmdet.core.mask.mask_target import * >>> H, W = 17, 18 >>> cfg = mmcv.Config({'mask_size': (13, 14)}) >>> rng = np.random.RandomState(0) >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image >>> pos_proposals_list = [ >>> torch.Tensor([ >>> [ 7.2425, 5.5929, 13.9414, 14.9541], >>> [ 7.3241, 3.6170, 16.3850, 15.3102], >>> ]), >>> torch.Tensor([ >>> [ 4.8448, 6.4010, 7.0314, 9.7681], >>> [ 5.9790, 2.6989, 7.4416, 4.8580], >>> [ 0.0000, 0.0000, 0.1398, 9.8232], >>> ]), >>> ] >>> # Corresponding class index for each proposal for each image >>> pos_assigned_gt_inds_list = [ >>> torch.LongTensor([7, 0]), >>> torch.LongTensor([5, 4, 1]), >>> ] >>> # Ground truth mask for each true object for each image >>> gt_masks_list = [ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), >>> ] >>> mask_targets = mask_target( >>> pos_proposals_list, pos_assigned_gt_inds_list, >>> gt_masks_list, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ cfg_list = [cfg for _ in range(len(pos_proposals_list))] mask_targets = map(mask_target_single, pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg_list) mask_targets = list(mask_targets) if len(mask_targets) > 0: mask_targets = torch.cat(mask_targets) return mask_targets def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): """Compute mask target for each positive proposal in the image. Args: pos_proposals (Tensor): Positive proposals. pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap or Polygon. cfg (dict): Config dict that indicate the mask size. Returns: Tensor: Mask target of each positive proposals in the image. Example: >>> import mmcv >>> import mmdet >>> from mmdet.core.mask import BitmapMasks >>> from mmdet.core.mask.mask_target import * # NOQA >>> H, W = 32, 32 >>> cfg = mmcv.Config({'mask_size': (7, 11)}) >>> rng = np.random.RandomState(0) >>> # Masks for each ground truth box (relative to the image) >>> gt_masks_data = rng.rand(3, H, W) >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W) >>> # Predicted positive boxes in one image >>> pos_proposals = torch.FloatTensor([ >>> [ 16.2, 5.5, 19.9, 20.9], >>> [ 17.3, 13.6, 19.3, 19.3], >>> [ 14.8, 16.4, 17.0, 23.7], >>> [ 0.0, 0.0, 16.0, 16.0], >>> [ 4.0, 0.0, 20.0, 16.0], >>> ]) >>> # For each predicted proposal, its assignment to a gt mask >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1]) >>> mask_targets = mask_target_single( >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ device = pos_proposals.device mask_size = _pair(cfg.mask_size) binarize = not cfg.get('soft_mask_target', False) num_pos = pos_proposals.size(0) if num_pos > 0: proposals_np = pos_proposals.cpu().numpy() maxh, maxw = gt_masks.height, gt_masks.width proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() mask_targets = gt_masks.crop_and_resize( proposals_np, mask_size, device=device, inds=pos_assigned_gt_inds, binarize=binarize).to_ndarray() mask_targets = torch.from_numpy(mask_targets).float().to(device) else: mask_targets = pos_proposals.new_zeros((0, ) + mask_size) return mask_targets ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/mask/structures.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import cv2 import mmcv import numpy as np import pycocotools.mask as maskUtils import torch from mmcv.ops.roi_align import roi_align class BaseInstanceMasks(metaclass=ABCMeta): """Base class for instance masks.""" @abstractmethod def rescale(self, scale, interpolation='nearest'): """Rescale masks as large as possible while keeping the aspect ratio. For details can refer to `mmcv.imrescale`. Args: scale (tuple[int]): The maximum size (h, w) of rescaled mask. interpolation (str): Same as :func:`mmcv.imrescale`. Returns: BaseInstanceMasks: The rescaled masks. """ @abstractmethod def resize(self, out_shape, interpolation='nearest'): """Resize masks to the given out_shape. Args: out_shape: Target (h, w) of resized mask. interpolation (str): See :func:`mmcv.imresize`. Returns: BaseInstanceMasks: The resized masks. """ @abstractmethod def flip(self, flip_direction='horizontal'): """Flip masks alone the given direction. Args: flip_direction (str): Either 'horizontal' or 'vertical'. Returns: BaseInstanceMasks: The flipped masks. """ @abstractmethod def pad(self, out_shape, pad_val): """Pad masks to the given size of (h, w). Args: out_shape (tuple[int]): Target (h, w) of padded mask. pad_val (int): The padded value. Returns: BaseInstanceMasks: The padded masks. """ @abstractmethod def crop(self, bbox): """Crop each mask by the given bbox. Args: bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). Return: BaseInstanceMasks: The cropped masks. """ @abstractmethod def crop_and_resize(self, bboxes, out_shape, inds, device, interpolation='bilinear', binarize=True): """Crop and resize masks by the given bboxes. This function is mainly used in mask targets computation. It firstly align mask to bboxes by assigned_inds, then crop mask by the assigned bbox and resize to the size of (mask_h, mask_w) Args: bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) out_shape (tuple[int]): Target (h, w) of resized mask inds (ndarray): Indexes to assign masks to each bbox, shape (N,) and values should be between [0, num_masks - 1]. device (str): Device of bboxes interpolation (str): See `mmcv.imresize` binarize (bool): if True fractional values are rounded to 0 or 1 after the resize operation. if False and unsupported an error will be raised. Defaults to True. Return: BaseInstanceMasks: the cropped and resized masks. """ @abstractmethod def expand(self, expanded_h, expanded_w, top, left): """see :class:`Expand`.""" @property @abstractmethod def areas(self): """ndarray: areas of each instance.""" @abstractmethod def to_ndarray(self): """Convert masks to the format of ndarray. Return: ndarray: Converted masks in the format of ndarray. """ @abstractmethod def to_tensor(self, dtype, device): """Convert masks to the format of Tensor. Args: dtype (str): Dtype of converted mask. device (torch.device): Device of converted masks. Returns: Tensor: Converted masks in the format of Tensor. """ @abstractmethod def translate(self, out_shape, offset, direction='horizontal', fill_val=0, interpolation='bilinear'): """Translate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". fill_val (int | float): Border value. Default 0. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: Translated masks. """ def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. Default 0. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: ndarray: Sheared masks. """ @abstractmethod def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """Rotate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. fill_val (int | float): Border value. Default 0 for masks. Returns: Rotated masks. """ class BitmapMasks(BaseInstanceMasks): """This class represents masks in the form of bitmaps. Args: masks (ndarray): ndarray of masks in shape (N, H, W), where N is the number of objects. height (int): height of masks width (int): width of masks Example: >>> from mmdet.core.mask.structures import * # NOQA >>> num_masks, H, W = 3, 32, 32 >>> rng = np.random.RandomState(0) >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int) >>> self = BitmapMasks(masks, height=H, width=W) >>> # demo crop_and_resize >>> num_boxes = 5 >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) >>> out_shape = (14, 14) >>> inds = torch.randint(0, len(self), size=(num_boxes,)) >>> device = 'cpu' >>> interpolation = 'bilinear' >>> new = self.crop_and_resize( ... bboxes, out_shape, inds, device, interpolation) >>> assert len(new) == num_boxes >>> assert new.height, new.width == out_shape """ def __init__(self, masks, height, width): self.height = height self.width = width if len(masks) == 0: self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) else: assert isinstance(masks, (list, np.ndarray)) if isinstance(masks, list): assert isinstance(masks[0], np.ndarray) assert masks[0].ndim == 2 # (H, W) else: assert masks.ndim == 3 # (N, H, W) self.masks = np.stack(masks).reshape(-1, height, width) assert self.masks.shape[1] == self.height assert self.masks.shape[2] == self.width def __getitem__(self, index): """Index the BitmapMask. Args: index (int | ndarray): Indices in the format of integer or ndarray. Returns: :obj:`BitmapMasks`: Indexed bitmap masks. """ masks = self.masks[index].reshape(-1, self.height, self.width) return BitmapMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation='nearest'): """See :func:`BaseInstanceMasks.rescale`.""" if len(self.masks) == 0: new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) else: rescaled_masks = np.stack([ mmcv.imrescale(mask, scale, interpolation=interpolation) for mask in self.masks ]) height, width = rescaled_masks.shape[1:] return BitmapMasks(rescaled_masks, height, width) def resize(self, out_shape, interpolation='nearest'): """See :func:`BaseInstanceMasks.resize`.""" if len(self.masks) == 0: resized_masks = np.empty((0, *out_shape), dtype=np.uint8) else: resized_masks = np.stack([ mmcv.imresize( mask, out_shape[::-1], interpolation=interpolation) for mask in self.masks ]) return BitmapMasks(resized_masks, *out_shape) def flip(self, flip_direction='horizontal'): """See :func:`BaseInstanceMasks.flip`.""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = self.masks else: flipped_masks = np.stack([ mmcv.imflip(mask, direction=flip_direction) for mask in self.masks ]) return BitmapMasks(flipped_masks, self.height, self.width) def pad(self, out_shape, pad_val=0): """See :func:`BaseInstanceMasks.pad`.""" if len(self.masks) == 0: padded_masks = np.empty((0, *out_shape), dtype=np.uint8) else: padded_masks = np.stack([ mmcv.impad(mask, shape=out_shape, pad_val=pad_val) for mask in self.masks ]) return BitmapMasks(padded_masks, *out_shape) def crop(self, bbox): """See :func:`BaseInstanceMasks.crop`.""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = np.empty((0, h, w), dtype=np.uint8) else: cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] return BitmapMasks(cropped_masks, h, w) def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear', binarize=True): """See :func:`BaseInstanceMasks.crop_and_resize`.""" if len(self.masks) == 0: empty_masks = np.empty((0, *out_shape), dtype=np.uint8) return BitmapMasks(empty_masks, *out_shape) # convert bboxes to tensor if isinstance(bboxes, np.ndarray): bboxes = torch.from_numpy(bboxes).to(device=device) if isinstance(inds, np.ndarray): inds = torch.from_numpy(inds).to(device=device) num_bbox = bboxes.shape[0] fake_inds = torch.arange( num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 rois = rois.to(device=device) if num_bbox > 0: gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( 0, inds).to(dtype=rois.dtype) targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, 1.0, 0, 'avg', True).squeeze(1) if binarize: resized_masks = (targets >= 0.5).cpu().numpy() else: resized_masks = targets.cpu().numpy() else: resized_masks = [] return BitmapMasks(resized_masks, *out_shape) def expand(self, expanded_h, expanded_w, top, left): """See :func:`BaseInstanceMasks.expand`.""" if len(self.masks) == 0: expanded_mask = np.empty((0, expanded_h, expanded_w), dtype=np.uint8) else: expanded_mask = np.zeros((len(self), expanded_h, expanded_w), dtype=np.uint8) expanded_mask[:, top:top + self.height, left:left + self.width] = self.masks return BitmapMasks(expanded_mask, expanded_h, expanded_w) def translate(self, out_shape, offset, direction='horizontal', fill_val=0, interpolation='bilinear'): """Translate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". fill_val (int | float): Border value. Default 0 for masks. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: BitmapMasks: Translated BitmapMasks. Example: >>> from mmdet.core.mask.structures import BitmapMasks >>> self = BitmapMasks.random(dtype=np.uint8) >>> out_shape = (32, 32) >>> offset = 4 >>> direction = 'horizontal' >>> fill_val = 0 >>> interpolation = 'bilinear' >>> # Note, There seem to be issues when: >>> # * out_shape is different than self's shape >>> # * the mask dtype is not supported by cv2.AffineWarp >>> new = self.translate(out_shape, offset, direction, fill_val, >>> interpolation) >>> assert len(new) == len(self) >>> assert new.height, new.width == out_shape """ if len(self.masks) == 0: translated_masks = np.empty((0, *out_shape), dtype=np.uint8) else: translated_masks = mmcv.imtranslate( self.masks.transpose((1, 2, 0)), offset, direction, border_value=fill_val, interpolation=interpolation) if translated_masks.ndim == 2: translated_masks = translated_masks[:, :, None] translated_masks = translated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(translated_masks, *out_shape) def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: BitmapMasks: The sheared masks. """ if len(self.masks) == 0: sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) else: sheared_masks = mmcv.imshear( self.masks.transpose((1, 2, 0)), magnitude, direction, border_value=border_value, interpolation=interpolation) if sheared_masks.ndim == 2: sheared_masks = sheared_masks[:, :, None] sheared_masks = sheared_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(sheared_masks, *out_shape) def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """Rotate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. fill_val (int | float): Border value. Default 0 for masks. Returns: BitmapMasks: Rotated BitmapMasks. """ if len(self.masks) == 0: rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) else: rotated_masks = mmcv.imrotate( self.masks.transpose((1, 2, 0)), angle, center=center, scale=scale, border_value=fill_val) if rotated_masks.ndim == 2: # case when only one mask, (h, w) rotated_masks = rotated_masks[:, :, None] # (h, w, 1) rotated_masks = rotated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(rotated_masks, *out_shape) @property def areas(self): """See :py:attr:`BaseInstanceMasks.areas`.""" return self.masks.sum((1, 2)) def to_ndarray(self): """See :func:`BaseInstanceMasks.to_ndarray`.""" return self.masks def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" return torch.tensor(self.masks, dtype=dtype, device=device) @classmethod def random(cls, num_masks=3, height=32, width=32, dtype=np.uint8, rng=None): """Generate random bitmap masks for demo / testing purposes. Example: >>> from mmdet.core.mask.structures import BitmapMasks >>> self = BitmapMasks.random() >>> print('self = {}'.format(self)) self = BitmapMasks(num_masks=3, height=32, width=32) """ from mmdet.utils.util_random import ensure_rng rng = ensure_rng(rng) masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) self = cls(masks, height=height, width=width) return self def get_bboxes(self): num_masks = len(self) boxes = np.zeros((num_masks, 4), dtype=np.float32) x_any = self.masks.any(axis=1) y_any = self.masks.any(axis=2) for idx in range(num_masks): x = np.where(x_any[idx, :])[0] y = np.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: # use +1 for x_max and y_max so that the right and bottom # boundary of instance masks are fully included by the box boxes[idx, :] = np.array([x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32) return boxes class PolygonMasks(BaseInstanceMasks): """This class represents masks in the form of polygons. Polygons is a list of three levels. The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates Args: masks (list[list[ndarray]]): The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates height (int): height of masks width (int): width of masks Example: >>> from mmdet.core.mask.structures import * # NOQA >>> masks = [ >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] >>> ] >>> height, width = 16, 16 >>> self = PolygonMasks(masks, height, width) >>> # demo translate >>> new = self.translate((16, 16), 4., direction='horizontal') >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) >>> # demo crop_and_resize >>> num_boxes = 3 >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) >>> out_shape = (16, 16) >>> inds = torch.randint(0, len(self), size=(num_boxes,)) >>> device = 'cpu' >>> interpolation = 'bilinear' >>> new = self.crop_and_resize( ... bboxes, out_shape, inds, device, interpolation) >>> assert len(new) == num_boxes >>> assert new.height, new.width == out_shape """ def __init__(self, masks, height, width): assert isinstance(masks, list) if len(masks) > 0: assert isinstance(masks[0], list) assert isinstance(masks[0][0], np.ndarray) self.height = height self.width = width self.masks = masks def __getitem__(self, index): """Index the polygon masks. Args: index (ndarray | List): The indices. Returns: :obj:`PolygonMasks`: The indexed polygon masks. """ if isinstance(index, np.ndarray): index = index.tolist() if isinstance(index, list): masks = [self.masks[i] for i in index] else: try: masks = self.masks[index] except Exception: raise ValueError( f'Unsupported input of type {type(index)} for indexing!') if len(masks) and isinstance(masks[0], np.ndarray): masks = [masks] # ensure a list of three levels return PolygonMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation=None): """see :func:`BaseInstanceMasks.rescale`""" new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) if len(self.masks) == 0: rescaled_masks = PolygonMasks([], new_h, new_w) else: rescaled_masks = self.resize((new_h, new_w)) return rescaled_masks def resize(self, out_shape, interpolation=None): """see :func:`BaseInstanceMasks.resize`""" if len(self.masks) == 0: resized_masks = PolygonMasks([], *out_shape) else: h_scale = out_shape[0] / self.height w_scale = out_shape[1] / self.width resized_masks = [] for poly_per_obj in self.masks: resized_poly = [] for p in poly_per_obj: p = p.copy() p[0::2] = p[0::2] * w_scale p[1::2] = p[1::2] * h_scale resized_poly.append(p) resized_masks.append(resized_poly) resized_masks = PolygonMasks(resized_masks, *out_shape) return resized_masks def flip(self, flip_direction='horizontal'): """see :func:`BaseInstanceMasks.flip`""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = PolygonMasks([], self.height, self.width) else: flipped_masks = [] for poly_per_obj in self.masks: flipped_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if flip_direction == 'horizontal': p[0::2] = self.width - p[0::2] elif flip_direction == 'vertical': p[1::2] = self.height - p[1::2] else: p[0::2] = self.width - p[0::2] p[1::2] = self.height - p[1::2] flipped_poly_per_obj.append(p) flipped_masks.append(flipped_poly_per_obj) flipped_masks = PolygonMasks(flipped_masks, self.height, self.width) return flipped_masks def crop(self, bbox): """see :func:`BaseInstanceMasks.crop`""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = PolygonMasks([], h, w) else: cropped_masks = [] for poly_per_obj in self.masks: cropped_poly_per_obj = [] for p in poly_per_obj: # pycocotools will clip the boundary p = p.copy() p[0::2] = p[0::2] - bbox[0] p[1::2] = p[1::2] - bbox[1] cropped_poly_per_obj.append(p) cropped_masks.append(cropped_poly_per_obj) cropped_masks = PolygonMasks(cropped_masks, h, w) return cropped_masks def pad(self, out_shape, pad_val=0): """padding has no effect on polygons`""" return PolygonMasks(self.masks, *out_shape) def expand(self, *args, **kwargs): """TODO: Add expand for polygon""" raise NotImplementedError def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear', binarize=True): """see :func:`BaseInstanceMasks.crop_and_resize`""" out_h, out_w = out_shape if len(self.masks) == 0: return PolygonMasks([], out_h, out_w) if not binarize: raise ValueError('Polygons are always binary, ' 'setting binarize=False is unsupported') resized_masks = [] for i in range(len(bboxes)): mask = self.masks[inds[i]] bbox = bboxes[i, :] x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) h_scale = out_h / max(h, 0.1) # avoid too large scale w_scale = out_w / max(w, 0.1) resized_mask = [] for p in mask: p = p.copy() # crop # pycocotools will clip the boundary p[0::2] = p[0::2] - bbox[0] p[1::2] = p[1::2] - bbox[1] # resize p[0::2] = p[0::2] * w_scale p[1::2] = p[1::2] * h_scale resized_mask.append(p) resized_masks.append(resized_mask) return PolygonMasks(resized_masks, *out_shape) def translate(self, out_shape, offset, direction='horizontal', fill_val=None, interpolation=None): """Translate the PolygonMasks. Example: >>> self = PolygonMasks.random(dtype=np.int) >>> out_shape = (self.height, self.width) >>> new = self.translate(out_shape, 4., direction='horizontal') >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 """ assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ f'used, and defaultly should be None or 0. got {fill_val}.' if len(self.masks) == 0: translated_masks = PolygonMasks([], *out_shape) else: translated_masks = [] for poly_per_obj in self.masks: translated_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if direction == 'horizontal': p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) elif direction == 'vertical': p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) translated_poly_per_obj.append(p) translated_masks.append(translated_poly_per_obj) translated_masks = PolygonMasks(translated_masks, *out_shape) return translated_masks def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """See :func:`BaseInstanceMasks.shear`.""" if len(self.masks) == 0: sheared_masks = PolygonMasks([], *out_shape) else: sheared_masks = [] if direction == 'horizontal': shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype(np.float32) elif direction == 'vertical': shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32) for poly_per_obj in self.masks: sheared_poly = [] for p in poly_per_obj: p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] new_coords = np.matmul(shear_matrix, p) # [2, n] new_coords[0, :] = np.clip(new_coords[0, :], 0, out_shape[1]) new_coords[1, :] = np.clip(new_coords[1, :], 0, out_shape[0]) sheared_poly.append( new_coords.transpose((1, 0)).reshape(-1)) sheared_masks.append(sheared_poly) sheared_masks = PolygonMasks(sheared_masks, *out_shape) return sheared_masks def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """See :func:`BaseInstanceMasks.rotate`.""" if len(self.masks) == 0: rotated_masks = PolygonMasks([], *out_shape) else: rotated_masks = [] rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) for poly_per_obj in self.masks: rotated_poly = [] for p in poly_per_obj: p = p.copy() coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] # pad 1 to convert from format [x, y] to homogeneous # coordinates format [x, y, 1] coords = np.concatenate( (coords, np.ones((coords.shape[0], 1), coords.dtype)), axis=1) # [n, 3] rotated_coords = np.matmul( rotate_matrix[None, :, :], coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, out_shape[1]) rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, out_shape[0]) rotated_poly.append(rotated_coords.reshape(-1)) rotated_masks.append(rotated_poly) rotated_masks = PolygonMasks(rotated_masks, *out_shape) return rotated_masks def to_bitmap(self): """convert polygon masks to bitmap masks.""" bitmap_masks = self.to_ndarray() return BitmapMasks(bitmap_masks, self.height, self.width) @property def areas(self): """Compute areas of masks. This func is modified from `detectron2 `_. The function only works with Polygons using the shoelace formula. Return: ndarray: areas of each instance """ # noqa: W501 area = [] for polygons_per_obj in self.masks: area_per_obj = 0 for p in polygons_per_obj: area_per_obj += self._polygon_area(p[0::2], p[1::2]) area.append(area_per_obj) return np.asarray(area) def _polygon_area(self, x, y): """Compute the area of a component of a polygon. Using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Args: x (ndarray): x coordinates of the component y (ndarray): y coordinates of the component Return: float: the are of the component """ # noqa: 501 return 0.5 * np.abs( np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def to_ndarray(self): """Convert masks to the format of ndarray.""" if len(self.masks) == 0: return np.empty((0, self.height, self.width), dtype=np.uint8) bitmap_masks = [] for poly_per_obj in self.masks: bitmap_masks.append( polygon_to_bitmap(poly_per_obj, self.height, self.width)) return np.stack(bitmap_masks) def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" if len(self.masks) == 0: return torch.empty((0, self.height, self.width), dtype=dtype, device=device) ndarray_masks = self.to_ndarray() return torch.tensor(ndarray_masks, dtype=dtype, device=device) @classmethod def random(cls, num_masks=3, height=32, width=32, n_verts=5, dtype=np.float32, rng=None): """Generate random polygon masks for demo / testing purposes. Adapted from [1]_ References: .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 Example: >>> from mmdet.core.mask.structures import PolygonMasks >>> self = PolygonMasks.random() >>> print('self = {}'.format(self)) """ from mmdet.utils.util_random import ensure_rng rng = ensure_rng(rng) def _gen_polygon(n, irregularity, spikeyness): """Creates the polygon by sampling points on a circle around the centre. Random noise is added by varying the angular spacing between sequential points, and by varying the radial distance of each point from the centre. Based on original code by Mike Ounsworth Args: n (int): number of vertices irregularity (float): [0,1] indicating how much variance there is in the angular spacing of vertices. [0,1] will map to [0, 2pi/numberOfVerts] spikeyness (float): [0,1] indicating how much variance there is in each vertex from the circle of radius aveRadius. [0,1] will map to [0, aveRadius] Returns: a list of vertices, in CCW order. """ from scipy.stats import truncnorm # Generate around the unit circle cx, cy = (0.0, 0.0) radius = 1 tau = np.pi * 2 irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n spikeyness = np.clip(spikeyness, 1e-9, 1) # generate n angle steps lower = (tau / n) - irregularity upper = (tau / n) + irregularity angle_steps = rng.uniform(lower, upper, n) # normalize the steps so that point 0 and point n+1 are the same k = angle_steps.sum() / (2 * np.pi) angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) # Convert high and low values to be wrt the standard normal range # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html low = 0 high = 2 * radius mean = radius std = spikeyness a = (low - mean) / std b = (high - mean) / std tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) # now generate the points radii = tnorm.rvs(n, random_state=rng) x_pts = cx + radii * np.cos(angles) y_pts = cy + radii * np.sin(angles) points = np.hstack([x_pts[:, None], y_pts[:, None]]) # Scale to 0-1 space points = points - points.min(axis=0) points = points / points.max(axis=0) # Randomly place within 0-1 space points = points * (rng.rand() * .8 + .2) min_pt = points.min(axis=0) max_pt = points.max(axis=0) high = (1 - max_pt) low = (0 - min_pt) offset = (rng.rand(2) * (high - low)) + low points = points + offset return points def _order_vertices(verts): """ References: https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise """ mlat = verts.T[0].sum() / len(verts) mlng = verts.T[1].sum() / len(verts) tau = np.pi * 2 angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + tau) % tau sortx = angle.argsort() verts = verts.take(sortx, axis=0) return verts # Generate a random exterior for each requested mask masks = [] for _ in range(num_masks): exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) exterior = (exterior * [(width, height)]).astype(dtype) masks.append([exterior.ravel()]) self = cls(masks, height, width) return self def get_bboxes(self): num_masks = len(self) boxes = np.zeros((num_masks, 4), dtype=np.float32) for idx, poly_per_obj in enumerate(self.masks): # simply use a number that is big enough for comparison with # coordinates xy_min = np.array([self.width * 2, self.height * 2], dtype=np.float32) xy_max = np.zeros(2, dtype=np.float32) for p in poly_per_obj: xy = np.array(p).reshape(-1, 2).astype(np.float32) xy_min = np.minimum(xy_min, np.min(xy, axis=0)) xy_max = np.maximum(xy_max, np.max(xy, axis=0)) boxes[idx, :2] = xy_min boxes[idx, 2:] = xy_max return boxes def polygon_to_bitmap(polygons, height, width): """Convert masks from the form of polygons to bitmaps. Args: polygons (list[ndarray]): masks in polygon representation height (int): mask height width (int): mask width Return: ndarray: the converted masks in bitmap representation """ rles = maskUtils.frPyObjects(polygons, height, width) rle = maskUtils.merge(rles) bitmap_mask = maskUtils.decode(rle).astype(np.bool) return bitmap_mask def bitmap_to_polygon(bitmap): """Convert masks from the form of bitmaps to polygons. Args: bitmap (ndarray): masks in bitmap representation. Return: list[ndarray]: the converted mask in polygon representation. bool: whether the mask has holes. """ bitmap = np.ascontiguousarray(bitmap).astype(np.uint8) # cv2.RETR_CCOMP: retrieves all of the contours and organizes them # into a two-level hierarchy. At the top level, there are external # boundaries of the components. At the second level, there are # boundaries of the holes. If there is another contour inside a hole # of a connected component, it is still put at the top level. # cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points. outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) contours = outs[-2] hierarchy = outs[-1] if hierarchy is None: return [], False # hierarchy[i]: 4 elements, for the indexes of next, previous, # parent, or nested contours. If there is no corresponding contour, # it will be -1. with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any() contours = [c.reshape(-1, 2) for c in contours] return contours, with_hole ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/mask/utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import pycocotools.mask as mask_util import torch def split_combined_polys(polys, poly_lens, polys_per_mask): """Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a poly is represented as a 1-D array. In dataset, all masks are concatenated into a single 1-D tensor. Here we need to split the tensor into original representations. Args: polys (list): a list (length = image num) of 1-D tensors poly_lens (list): a list (length = image num) of poly length polys_per_mask (list): a list (length = image num) of poly number of each mask Returns: list: a list (length = image num) of list (length = mask num) of \ list (length = poly num) of numpy array. """ mask_polys_list = [] for img_id in range(len(polys)): polys_single = polys[img_id] polys_lens_single = poly_lens[img_id].tolist() polys_per_mask_single = polys_per_mask[img_id].tolist() split_polys = mmcv.slice_list(polys_single, polys_lens_single) mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) mask_polys_list.append(mask_polys) return mask_polys_list # TODO: move this function to more proper place def encode_mask_results(mask_results): """Encode bitmap mask to RLE code. Args: mask_results (list | tuple[list]): bitmap mask results. In mask scoring rcnn, mask_results is a tuple of (segm_results, segm_cls_score). Returns: list | tuple: RLE encoded mask. """ if isinstance(mask_results, tuple): # mask scoring cls_segms, cls_mask_scores = mask_results else: cls_segms = mask_results num_classes = len(cls_segms) encoded_mask_results = [[] for _ in range(num_classes)] for i in range(len(cls_segms)): for cls_segm in cls_segms[i]: encoded_mask_results[i].append( mask_util.encode( np.array( cls_segm[:, :, np.newaxis], order='F', dtype='uint8'))[0]) # encoded with RLE if isinstance(mask_results, tuple): return encoded_mask_results, cls_mask_scores else: return encoded_mask_results def mask2bbox(masks): """Obtain tight bounding boxes of binary masks. Args: masks (Tensor): Binary mask of shape (n, h, w). Returns: Tensor: Bboxe with shape (n, 4) of \ positive region in binary mask. """ N = masks.shape[0] bboxes = masks.new_zeros((N, 4), dtype=torch.float32) x_any = torch.any(masks, dim=1) y_any = torch.any(masks, dim=2) for i in range(N): x = torch.where(x_any[i, :])[0] y = torch.where(y_any[i, :])[0] if len(x) > 0 and len(y) > 0: bboxes[i, :] = bboxes.new_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1]) return bboxes ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/optimizers/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .builder import OPTIMIZER_BUILDERS, build_optimizer from .layer_decay_optimizer_constructor import \ LearningRateDecayOptimizerConstructor __all__ = [ 'LearningRateDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS', 'build_optimizer' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/optimizers/builder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy from mmcv.runner.optimizer import OPTIMIZER_BUILDERS as MMCV_OPTIMIZER_BUILDERS from mmcv.utils import Registry, build_from_cfg OPTIMIZER_BUILDERS = Registry( 'optimizer builder', parent=MMCV_OPTIMIZER_BUILDERS) def build_optimizer_constructor(cfg): constructor_type = cfg.get('type') if constructor_type in OPTIMIZER_BUILDERS: return build_from_cfg(cfg, OPTIMIZER_BUILDERS) elif constructor_type in MMCV_OPTIMIZER_BUILDERS: return build_from_cfg(cfg, MMCV_OPTIMIZER_BUILDERS) else: raise KeyError(f'{constructor_type} is not registered ' 'in the optimizer builder registry.') def build_optimizer(model, cfg): optimizer_cfg = copy.deepcopy(cfg) constructor_type = optimizer_cfg.pop('constructor', 'DefaultOptimizerConstructor') paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) optim_constructor = build_optimizer_constructor( dict( type=constructor_type, optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)) optimizer = optim_constructor(model) return optimizer ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/optimizers/layer_decay_optimizer_constructor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import json from mmcv.runner import DefaultOptimizerConstructor, get_dist_info from mmdet.utils import get_root_logger from .builder import OPTIMIZER_BUILDERS def get_layer_id_for_convnext(var_name, max_layer_id): """Get the layer id to set the different learning rates in ``layer_wise`` decay_type. Args: var_name (str): The key of the model. max_layer_id (int): Maximum layer id. Returns: int: The id number corresponding to different learning rate in ``LearningRateDecayOptimizerConstructor``. """ if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed'): return 0 elif var_name.startswith('backbone.downsample_layers'): stage_id = int(var_name.split('.')[2]) if stage_id == 0: layer_id = 0 elif stage_id == 1: layer_id = 2 elif stage_id == 2: layer_id = 3 elif stage_id == 3: layer_id = max_layer_id return layer_id elif var_name.startswith('backbone.stages'): stage_id = int(var_name.split('.')[2]) block_id = int(var_name.split('.')[3]) if stage_id == 0: layer_id = 1 elif stage_id == 1: layer_id = 2 elif stage_id == 2: layer_id = 3 + block_id // 3 elif stage_id == 3: layer_id = max_layer_id return layer_id else: return max_layer_id + 1 def get_stage_id_for_convnext(var_name, max_stage_id): """Get the stage id to set the different learning rates in ``stage_wise`` decay_type. Args: var_name (str): The key of the model. max_stage_id (int): Maximum stage id. Returns: int: The id number corresponding to different learning rate in ``LearningRateDecayOptimizerConstructor``. """ if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed'): return 0 elif var_name.startswith('backbone.downsample_layers'): return 0 elif var_name.startswith('backbone.stages'): stage_id = int(var_name.split('.')[2]) return stage_id + 1 else: return max_stage_id - 1 @OPTIMIZER_BUILDERS.register_module() class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor): # Different learning rates are set for different layers of backbone. # Note: Currently, this optimizer constructor is built for ConvNeXt. def add_params(self, params, module, **kwargs): """Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added. """ logger = get_root_logger() parameter_groups = {} logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') num_layers = self.paramwise_cfg.get('num_layers') + 2 decay_rate = self.paramwise_cfg.get('decay_rate') decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') logger.info('Build LearningRateDecayOptimizerConstructor ' f'{decay_type} {decay_rate} - {num_layers}') weight_decay = self.base_wd for name, param in module.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith('.bias') or name in ( 'pos_embed', 'cls_token'): group_name = 'no_decay' this_weight_decay = 0. else: group_name = 'decay' this_weight_decay = weight_decay if 'layer_wise' in decay_type: if 'ConvNeXt' in module.backbone.__class__.__name__: layer_id = get_layer_id_for_convnext( name, self.paramwise_cfg.get('num_layers')) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() elif decay_type == 'stage_wise': if 'ConvNeXt' in module.backbone.__class__.__name__: layer_id = get_stage_id_for_convnext(name, num_layers) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() group_name = f'layer_{layer_id}_{group_name}' if group_name not in parameter_groups: scale = decay_rate**(num_layers - layer_id - 1) parameter_groups[group_name] = { 'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': scale * self.base_lr, } parameter_groups[group_name]['params'].append(param) parameter_groups[group_name]['param_names'].append(name) rank, _ = get_dist_info() if rank == 0: to_display = {} for key in parameter_groups: to_display[key] = { 'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay'], } logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') params.extend(parameter_groups.values()) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/post_processing/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .bbox_nms import fast_nms, multiclass_nms from .matrix_nms import mask_matrix_nms from .merge_augs import (merge_aug_bboxes, merge_aug_masks, merge_aug_proposals, merge_aug_scores) __all__ = [ 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/post_processing/bbox_nms.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops.nms import batched_nms from mmdet.core.bbox.iou_calculators import bbox_overlaps def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None, return_inds=False): """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_cfg (dict): a dict that contains the arguments of nms operations max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. score_factors (Tensor, optional): The factors multiplied to scores before applying NMS. Default to None. return_inds (bool, optional): Whether return the indices of kept bboxes. Default to False. Returns: tuple: (dets, labels, indices (optional)), tensors of shape (k, 5), (k), and (k). Dets are boxes with scores. Labels are 0-based. """ num_classes = multi_scores.size(1) - 1 # exclude background category if multi_bboxes.shape[1] > 4: bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) else: bboxes = multi_bboxes[:, None].expand( multi_scores.size(0), num_classes, 4) scores = multi_scores[:, :-1] labels = torch.arange(num_classes, dtype=torch.long, device=scores.device) labels = labels.view(1, -1).expand_as(scores) bboxes = bboxes.reshape(-1, 4) scores = scores.reshape(-1) labels = labels.reshape(-1) if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT # remove low scoring boxes valid_mask = scores > score_thr # multiply score_factor after threshold to preserve more bboxes, improve # mAP by 1% for YOLOv3 if score_factors is not None: # expand the shape to match original shape of score score_factors = score_factors.view(-1, 1).expand( multi_scores.size(0), num_classes) score_factors = score_factors.reshape(-1) scores = scores * score_factors if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT inds = valid_mask.nonzero(as_tuple=False).squeeze(1) bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] else: # TensorRT NMS plugin has invalid output filled with -1 # add dummy data to make detection output correct. bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0) scores = torch.cat([scores, scores.new_zeros(1)], dim=0) labels = torch.cat([labels, labels.new_zeros(1)], dim=0) if bboxes.numel() == 0: if torch.onnx.is_in_onnx_export(): raise RuntimeError('[ONNX Error] Can not record NMS ' 'as it has not been executed this time') dets = torch.cat([bboxes, scores[:, None]], -1) if return_inds: return dets, labels, inds else: return dets, labels dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) if max_num > 0: dets = dets[:max_num] keep = keep[:max_num] if return_inds: return dets, labels[keep], inds[keep] else: return dets, labels[keep] def fast_nms(multi_bboxes, multi_scores, multi_coeffs, score_thr, iou_thr, top_k, max_num=-1): """Fast NMS in `YOLACT `_. Fast NMS allows already-removed detections to suppress other detections so that every instance can be decided to be kept or discarded in parallel, which is not possible in traditional NMS. This relaxation allows us to implement Fast NMS entirely in standard GPU-accelerated matrix operations. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class+1), where the last column contains scores of the background class, but this will be ignored. multi_coeffs (Tensor): shape (n, #class*coeffs_dim). score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_thr (float): IoU threshold to be considered as conflicted. top_k (int): if there are more than top_k bboxes before NMS, only top top_k will be kept. max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. If -1, keep all the bboxes. Default: -1. Returns: tuple: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1), and (k, coeffs_dim). Dets are boxes with scores. Labels are 0-based. """ scores = multi_scores[:, :-1].t() # [#class, n] scores, idx = scores.sort(1, descending=True) idx = idx[:, :top_k].contiguous() scores = scores[:, :top_k] # [#class, topk] num_classes, num_dets = idx.size() boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] iou.triu_(diagonal=1) iou_max, _ = iou.max(dim=1) # Now just filter out the ones higher than the threshold keep = iou_max <= iou_thr # Second thresholding introduces 0.2 mAP gain at negligible time cost keep *= scores > score_thr # Assign each kept detection to its corresponding class classes = torch.arange( num_classes, device=boxes.device)[:, None].expand_as(keep) classes = classes[keep] boxes = boxes[keep] coeffs = coeffs[keep] scores = scores[keep] # Only keep the top max_num highest scores across all classes scores, idx = scores.sort(0, descending=True) if max_num > 0: idx = idx[:max_num] scores = scores[:max_num] classes = classes[idx] boxes = boxes[idx] coeffs = coeffs[idx] cls_dets = torch.cat([boxes, scores[:, None]], dim=1) return cls_dets, classes, coeffs ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/post_processing/matrix_nms.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch def mask_matrix_nms(masks, labels, scores, filter_thr=-1, nms_pre=-1, max_num=-1, kernel='gaussian', sigma=2.0, mask_area=None): """Matrix NMS for multi-class masks. Args: masks (Tensor): Has shape (num_instances, h, w) labels (Tensor): Labels of corresponding masks, has shape (num_instances,). scores (Tensor): Mask scores of corresponding masks, has shape (num_instances). filter_thr (float): Score threshold to filter the masks after matrix nms. Default: -1, which means do not use filter_thr. nms_pre (int): The max number of instances to do the matrix nms. Default: -1, which means do not use nms_pre. max_num (int, optional): If there are more than max_num masks after matrix, only top max_num will be kept. Default: -1, which means do not use max_num. kernel (str): 'linear' or 'gaussian'. sigma (float): std in gaussian method. mask_area (Tensor): The sum of seg_masks. Returns: tuple(Tensor): Processed mask results. - scores (Tensor): Updated scores, has shape (n,). - labels (Tensor): Remained labels, has shape (n,). - masks (Tensor): Remained masks, has shape (n, w, h). - keep_inds (Tensor): The indices number of the remaining mask in the input mask, has shape (n,). """ assert len(labels) == len(masks) == len(scores) if len(labels) == 0: return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( 0, *masks.shape[-2:]), labels.new_zeros(0) if mask_area is None: mask_area = masks.sum((1, 2)).float() else: assert len(masks) == len(mask_area) # sort and keep top nms_pre scores, sort_inds = torch.sort(scores, descending=True) keep_inds = sort_inds if nms_pre > 0 and len(sort_inds) > nms_pre: sort_inds = sort_inds[:nms_pre] keep_inds = keep_inds[:nms_pre] scores = scores[:nms_pre] masks = masks[sort_inds] mask_area = mask_area[sort_inds] labels = labels[sort_inds] num_masks = len(labels) flatten_masks = masks.reshape(num_masks, -1).float() # inter. inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0)) expanded_mask_area = mask_area.expand(num_masks, num_masks) # Upper triangle iou matrix. iou_matrix = (inter_matrix / (expanded_mask_area + expanded_mask_area.transpose(1, 0) - inter_matrix)).triu(diagonal=1) # label_specific matrix. expanded_labels = labels.expand(num_masks, num_masks) # Upper triangle label matrix. label_matrix = (expanded_labels == expanded_labels.transpose( 1, 0)).triu(diagonal=1) # IoU compensation compensate_iou, _ = (iou_matrix * label_matrix).max(0) compensate_iou = compensate_iou.expand(num_masks, num_masks).transpose(1, 0) # IoU decay decay_iou = iou_matrix * label_matrix # Calculate the decay_coefficient if kernel == 'gaussian': decay_matrix = torch.exp(-1 * sigma * (decay_iou**2)) compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2)) decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0) elif kernel == 'linear': decay_matrix = (1 - decay_iou) / (1 - compensate_iou) decay_coefficient, _ = decay_matrix.min(0) else: raise NotImplementedError( f'{kernel} kernel is not supported in matrix nms!') # update the score. scores = scores * decay_coefficient if filter_thr > 0: keep = scores >= filter_thr keep_inds = keep_inds[keep] if not keep.any(): return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( 0, *masks.shape[-2:]), labels.new_zeros(0) masks = masks[keep] scores = scores[keep] labels = labels[keep] # sort and keep top max_num scores, sort_inds = torch.sort(scores, descending=True) keep_inds = keep_inds[sort_inds] if max_num > 0 and len(sort_inds) > max_num: sort_inds = sort_inds[:max_num] keep_inds = keep_inds[:max_num] scores = scores[:max_num] masks = masks[sort_inds] labels = labels[sort_inds] return scores, labels, masks, keep_inds ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/post_processing/merge_augs.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import warnings import numpy as np import torch from mmcv import ConfigDict from mmcv.ops import nms from ..bbox import bbox_mapping_back def merge_aug_proposals(aug_proposals, img_metas, cfg): """Merge augmented proposals (multiscale, flip, etc.) Args: aug_proposals (list[Tensor]): proposals from different testing schemes, shape (n, 5). Note that they are not rescaled to the original image size. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. cfg (dict): rpn test config. Returns: Tensor: shape (n, 4), proposals corresponding to original image scale. """ cfg = copy.deepcopy(cfg) # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ f'max_per_img at the same time, but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ f'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ f'iou_threshold in nms and ' \ f'nms_thr at the same time, but get ' \ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the nms_thr ' \ f'which will be deprecated.' recovered_proposals = [] for proposals, img_info in zip(aug_proposals, img_metas): img_shape = img_info['img_shape'] scale_factor = img_info['scale_factor'] flip = img_info['flip'] flip_direction = img_info['flip_direction'] _proposals = proposals.clone() _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, scale_factor, flip, flip_direction) recovered_proposals.append(_proposals) aug_proposals = torch.cat(recovered_proposals, dim=0) merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), aug_proposals[:, -1].contiguous(), cfg.nms.iou_threshold) scores = merged_proposals[:, 4] _, order = scores.sort(0, descending=True) num = min(cfg.max_per_img, merged_proposals.shape[0]) order = order[:num] merged_proposals = merged_proposals[order, :] return merged_proposals def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.stack(recovered_bboxes).mean(dim=0) if aug_scores is None: return bboxes else: scores = torch.stack(aug_scores).mean(dim=0) return bboxes, scores def merge_aug_scores(aug_scores): """Merge augmented bbox scores.""" if isinstance(aug_scores[0], torch.Tensor): return torch.mean(torch.stack(aug_scores), dim=0) else: return np.mean(aug_scores, axis=0) def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): """Merge augmented mask prediction. Args: aug_masks (list[ndarray]): shape (n, #class, h, w) img_shapes (list[ndarray]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_masks = [] for mask, img_info in zip(aug_masks, img_metas): flip = img_info[0]['flip'] if flip: flip_direction = img_info[0]['flip_direction'] if flip_direction == 'horizontal': mask = mask[:, :, :, ::-1] elif flip_direction == 'vertical': mask = mask[:, :, ::-1, :] elif flip_direction == 'diagonal': mask = mask[:, :, :, ::-1] mask = mask[:, :, ::-1, :] else: raise ValueError( f"Invalid flipping direction '{flip_direction}'") recovered_masks.append(mask) if weights is None: merged_masks = np.mean(recovered_masks, axis=0) else: merged_masks = np.average( np.array(recovered_masks), axis=0, weights=np.array(weights)) return merged_masks ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/utils/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean, sync_random_seed) from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, select_single_mlvl, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict', 'center_of_mass', 'generate_coordinate', 'select_single_mlvl', 'filter_scores_and_topk', 'sync_random_seed' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/utils/dist_utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import functools import pickle import warnings from collections import OrderedDict import numpy as np import torch import torch.distributed as dist from mmcv.runner import OptimizerHook, get_dist_info from torch._utils import (_flatten_dense_tensors, _take_tensors, _unflatten_dense_tensors) def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): if bucket_size_mb > 0: bucket_size_bytes = bucket_size_mb * 1024 * 1024 buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if tp not in buckets: buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for tensor, synced in zip( bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced) def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): """Allreduce gradients. Args: params (list[torch.Parameters]): List of parameters of a model coalesce (bool, optional): Whether allreduce parameters as a whole. Defaults to True. bucket_size_mb (int, optional): Size of bucket, the unit is MB. Defaults to -1. """ grads = [ param.grad.data for param in params if param.requires_grad and param.grad is not None ] world_size = dist.get_world_size() if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size)) class DistOptimizerHook(OptimizerHook): """Deprecated optimizer hook for distributed training.""" def __init__(self, *args, **kwargs): warnings.warn('"DistOptimizerHook" is deprecated, please switch to' '"mmcv.runner.OptimizerHook".') super().__init__(*args, **kwargs) def reduce_mean(tensor): """"Obtain the mean of tensor on different GPUs.""" if not (dist.is_available() and dist.is_initialized()): return tensor tensor = tensor.clone() dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) return tensor def obj2tensor(pyobj, device='cuda'): """Serialize picklable python object to tensor.""" storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) return torch.ByteTensor(storage).to(device=device) def tensor2obj(tensor): """Deserialize tensor to picklable python object.""" return pickle.loads(tensor.cpu().numpy().tobytes()) @functools.lru_cache() def _get_global_gloo_group(): """Return a process group based on gloo backend, containing all the ranks The result is cached.""" if dist.get_backend() == 'nccl': return dist.new_group(backend='gloo') else: return dist.group.WORLD def all_reduce_dict(py_dict, op='sum', group=None, to_float=True): """Apply all reduce function for python dict object. The code is modified from https://github.com/Megvii- BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py. NOTE: make sure that py_dict in different ranks has the same keys and the values should be in the same shape. Currently only supports nccl backend. Args: py_dict (dict): Dict to be applied all reduce op. op (str): Operator, could be 'sum' or 'mean'. Default: 'sum' group (:obj:`torch.distributed.group`, optional): Distributed group, Default: None. to_float (bool): Whether to convert all values of dict to float. Default: True. Returns: OrderedDict: reduced python dict object. """ warnings.warn( 'group` is deprecated. Currently only supports NCCL backend.') _, world_size = get_dist_info() if world_size == 1: return py_dict # all reduce logic across different devices. py_key = list(py_dict.keys()) if not isinstance(py_dict, OrderedDict): py_key_tensor = obj2tensor(py_key) dist.broadcast(py_key_tensor, src=0) py_key = tensor2obj(py_key_tensor) tensor_shapes = [py_dict[k].shape for k in py_key] tensor_numels = [py_dict[k].numel() for k in py_key] if to_float: warnings.warn('Note: the "to_float" is True, you need to ' 'ensure that the behavior is reasonable.') flatten_tensor = torch.cat( [py_dict[k].flatten().float() for k in py_key]) else: flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM) if op == 'mean': flatten_tensor /= world_size split_tensors = [ x.reshape(shape) for x, shape in zip( torch.split(flatten_tensor, tensor_numels), tensor_shapes) ] out_dict = {k: v for k, v in zip(py_key, split_tensors)} if isinstance(py_dict, OrderedDict): out_dict = OrderedDict(out_dict) return out_dict def sync_random_seed(seed=None, device='cuda'): """Make sure different ranks share the same seed. All workers must call this function, otherwise it will deadlock. This method is generally used in `DistributedSampler`, because the seed should be identical across all processes in the distributed group. In distributed sampling, different ranks should sample non-overlapped data in the dataset. Therefore, this function is used to make sure that each rank shuffles the data indices in the same order based on the same seed. Then different ranks could use different indices to select non-overlapped data from the same data list. Args: seed (int, Optional): The seed. Default to None. device (str): The device where the seed will be put on. Default to 'cuda'. Returns: int: Seed to be used. """ if seed is None: seed = np.random.randint(2**31) assert isinstance(seed, int) rank, world_size = get_dist_info() if world_size == 1: return seed if rank == 0: random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item() ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/utils/misc.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from functools import partial import numpy as np import torch from six.moves import map, zip from ..mask.structures import BitmapMasks, PolygonMasks def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """ if isinstance(mask, (BitmapMasks, PolygonMasks)): mask = mask.to_ndarray() elif isinstance(mask, torch.Tensor): mask = mask.detach().cpu().numpy() elif not isinstance(mask, np.ndarray): raise TypeError(f'Unsupported {type(mask)} data type') return mask def flip_tensor(src_tensor, flip_direction): """flip tensor base on flip_direction. Args: src_tensor (Tensor): input feature map, shape (B, C, H, W). flip_direction (str): The flipping direction. Options are 'horizontal', 'vertical', 'diagonal'. Returns: out_tensor (Tensor): Flipped tensor. """ assert src_tensor.ndim == 4 valid_directions = ['horizontal', 'vertical', 'diagonal'] assert flip_direction in valid_directions if flip_direction == 'horizontal': out_tensor = torch.flip(src_tensor, [3]) elif flip_direction == 'vertical': out_tensor = torch.flip(src_tensor, [2]) else: out_tensor = torch.flip(src_tensor, [2, 3]) return out_tensor def select_single_mlvl(mlvl_tensors, batch_id, detach=True): """Extract a multi-scale single image tensor from a multi-scale batch tensor based on batch index. Note: The default value of detach is True, because the proposal gradient needs to be detached during the training of the two-stage model. E.g Cascade Mask R-CNN. Args: mlvl_tensors (list[Tensor]): Batch tensor for all scale levels, each is a 4D-tensor. batch_id (int): Batch index. detach (bool): Whether detach gradient. Default True. Returns: list[Tensor]: Multi-scale single image tensor. """ assert isinstance(mlvl_tensors, (list, tuple)) num_levels = len(mlvl_tensors) if detach: mlvl_tensor_list = [ mlvl_tensors[i][batch_id].detach() for i in range(num_levels) ] else: mlvl_tensor_list = [ mlvl_tensors[i][batch_id] for i in range(num_levels) ] return mlvl_tensor_list def filter_scores_and_topk(scores, score_thr, topk, results=None): """Filter results using score threshold and topk candidates. Args: scores (Tensor): The scores, shape (num_bboxes, K). score_thr (float): The score filter threshold. topk (int): The number of topk candidates. results (dict or list or Tensor, Optional): The results to which the filtering rule is to be applied. The shape of each item is (num_bboxes, N). Returns: tuple: Filtered results - scores (Tensor): The scores after being filtered, \ shape (num_bboxes_filtered, ). - labels (Tensor): The class labels, shape \ (num_bboxes_filtered, ). - anchor_idxs (Tensor): The anchor indexes, shape \ (num_bboxes_filtered, ). - filtered_results (dict or list or Tensor, Optional): \ The filtered results. The shape of each item is \ (num_bboxes_filtered, N). """ valid_mask = scores > score_thr scores = scores[valid_mask] valid_idxs = torch.nonzero(valid_mask) num_topk = min(topk, valid_idxs.size(0)) # torch.sort is actually faster than .topk (at least on GPUs) scores, idxs = scores.sort(descending=True) scores = scores[:num_topk] topk_idxs = valid_idxs[idxs[:num_topk]] keep_idxs, labels = topk_idxs.unbind(dim=1) filtered_results = None if results is not None: if isinstance(results, dict): filtered_results = {k: v[keep_idxs] for k, v in results.items()} elif isinstance(results, list): filtered_results = [result[keep_idxs] for result in results] elif isinstance(results, torch.Tensor): filtered_results = results[keep_idxs] else: raise NotImplementedError(f'Only supports dict or list or Tensor, ' f'but get {type(results)}.') return scores, labels, keep_idxs, filtered_results def center_of_mass(mask, esp=1e-6): """Calculate the centroid coordinates of the mask. Args: mask (Tensor): The mask to be calculated, shape (h, w). esp (float): Avoid dividing by zero. Default: 1e-6. Returns: tuple[Tensor]: the coordinates of the center point of the mask. - center_h (Tensor): the center point of the height. - center_w (Tensor): the center point of the width. """ h, w = mask.shape grid_h = torch.arange(h, device=mask.device)[:, None] grid_w = torch.arange(w, device=mask.device) normalizer = mask.sum().float().clamp(min=esp) center_h = (mask * grid_h).sum() / normalizer center_w = (mask * grid_w).sum() / normalizer return center_h, center_w def generate_coordinate(featmap_sizes, device='cuda'): """Generate the coordinate. Args: featmap_sizes (tuple): The feature to be calculated, of shape (N, C, W, H). device (str): The device where the feature will be put on. Returns: coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H). """ x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) y, x = torch.meshgrid(y_range, x_range) y = y.expand([featmap_sizes[0], 1, -1, -1]) x = x.expand([featmap_sizes[0], 1, -1, -1]) coord_feat = torch.cat([x, y], 1) return coord_feat ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/visualization/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .image import (color_val_matplotlib, imshow_det_bboxes, imshow_gt_det_bboxes) from .palette import get_palette, palette_val __all__ = [ 'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib', 'palette_val', 'get_palette' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/visualization/image.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import sys import cv2 import matplotlib.pyplot as plt import mmcv import numpy as np import pycocotools.mask as mask_util from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET from ..mask.structures import bitmap_to_polygon from ..utils import mask2ndarray from .palette import get_palette, palette_val __all__ = [ 'color_val_matplotlib', 'draw_masks', 'draw_bboxes', 'draw_labels', 'imshow_det_bboxes', 'imshow_gt_det_bboxes' ] EPS = 1e-2 def color_val_matplotlib(color): """Convert various input in BGR order to normalized RGB matplotlib color tuples. Args: color (:obj`Color` | str | tuple | int | ndarray): Color inputs. Returns: tuple[float]: A tuple of 3 normalized floats indicating RGB channels. """ color = mmcv.color_val(color) color = [color / 255 for color in color[::-1]] return tuple(color) def _get_adaptive_scales(areas, min_area=800, max_area=30000): """Get adaptive scales according to areas. The scale range is [0.5, 1.0]. When the area is less than ``'min_area'``, the scale is 0.5 while the area is larger than ``'max_area'``, the scale is 1.0. Args: areas (ndarray): The areas of bboxes or masks with the shape of (n, ). min_area (int): Lower bound areas for adaptive scales. Default: 800. max_area (int): Upper bound areas for adaptive scales. Default: 30000. Returns: ndarray: The adaotive scales with the shape of (n, ). """ scales = 0.5 + (areas - min_area) / (max_area - min_area) scales = np.clip(scales, 0.5, 1.0) return scales def _get_bias_color(base, max_dist=30): """Get different colors for each masks. Get different colors for each masks by adding a bias color to the base category color. Args: base (ndarray): The base category color with the shape of (3, ). max_dist (int): The max distance of bias. Default: 30. Returns: ndarray: The new color for a mask with the shape of (3, ). """ new_color = base + np.random.randint( low=-max_dist, high=max_dist + 1, size=3) return np.clip(new_color, 0, 255, new_color) def draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2): """Draw bounding boxes on the axes. Args: ax (matplotlib.Axes): The input axes. bboxes (ndarray): The input bounding boxes with the shape of (n, 4). color (list[tuple] | matplotlib.color): the colors for each bounding boxes. alpha (float): Transparency of bounding boxes. Default: 0.8. thickness (int): Thickness of lines. Default: 2. Returns: matplotlib.Axes: The result axes. """ polygons = [] for i, bbox in enumerate(bboxes): bbox_int = bbox.astype(np.int32) poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]], [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]] np_poly = np.array(poly).reshape((4, 2)) polygons.append(Polygon(np_poly)) p = PatchCollection( polygons, facecolor='none', edgecolors=color, linewidths=thickness, alpha=alpha) ax.add_collection(p) return ax def draw_labels(ax, labels, positions, scores=None, class_names=None, color='w', font_size=8, scales=None, horizontal_alignment='left'): """Draw labels on the axes. Args: ax (matplotlib.Axes): The input axes. labels (ndarray): The labels with the shape of (n, ). positions (ndarray): The positions to draw each labels. scores (ndarray): The scores for each labels. class_names (list[str]): The class names. color (list[tuple] | matplotlib.color): The colors for labels. font_size (int): Font size of texts. Default: 8. scales (list[float]): Scales of texts. Default: None. horizontal_alignment (str): The horizontal alignment method of texts. Default: 'left'. Returns: matplotlib.Axes: The result axes. """ for i, (pos, label) in enumerate(zip(positions, labels)): label_text = class_names[ label] if class_names is not None else f'class {label}' if scores is not None: label_text += f'|{scores[i]:.02f}' text_color = color[i] if isinstance(color, list) else color font_size_mask = font_size if scales is None else font_size * scales[i] ax.text( pos[0], pos[1], f'{label_text}', bbox={ 'facecolor': 'black', 'alpha': 0.8, 'pad': 0.7, 'edgecolor': 'none' }, color=text_color, fontsize=font_size_mask, verticalalignment='top', horizontalalignment=horizontal_alignment) return ax def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8): """Draw masks on the image and their edges on the axes. Args: ax (matplotlib.Axes): The input axes. img (ndarray): The image with the shape of (3, h, w). masks (ndarray): The masks with the shape of (n, h, w). color (ndarray): The colors for each masks with the shape of (n, 3). with_edge (bool): Whether to draw edges. Default: True. alpha (float): Transparency of bounding boxes. Default: 0.8. Returns: matplotlib.Axes: The result axes. ndarray: The result image. """ taken_colors = set([0, 0, 0]) if color is None: random_colors = np.random.randint(0, 255, (masks.size(0), 3)) color = [tuple(c) for c in random_colors] color = np.array(color, dtype=np.uint8) polygons = [] for i, mask in enumerate(masks): if with_edge: contours, _ = bitmap_to_polygon(mask) polygons += [Polygon(c) for c in contours] color_mask = color[i] while tuple(color_mask) in taken_colors: color_mask = _get_bias_color(color_mask) taken_colors.add(tuple(color_mask)) mask = mask.astype(bool) img[mask] = img[mask] * (1 - alpha) + color_mask * alpha p = PatchCollection( polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8) ax.add_collection(p) return ax, img def imshow_det_bboxes(img, bboxes=None, labels=None, segms=None, class_names=None, score_thr=0, bbox_color='green', text_color='green', mask_color=None, thickness=2, font_size=8, win_name='', show=True, wait_time=0, out_file=None): """Draw bboxes and class labels (with scores) on an image. Args: img (str | ndarray): The image to be displayed. bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or (n, 5). labels (ndarray): Labels of bboxes. segms (ndarray | None): Masks, shaped (n,h,w) or None. class_names (list[str]): Names of each classes. score_thr (float): Minimum score of bboxes to be shown. Default: 0. bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: 'green'. text_color (list[tuple] | tuple | str | None): Colors of texts. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: 'green'. mask_color (list[tuple] | tuple | str | None, optional): Colors of masks. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: None. thickness (int): Thickness of lines. Default: 2. font_size (int): Font size of texts. Default: 13. show (bool): Whether to show the image. Default: True. win_name (str): The window name. Default: ''. wait_time (float): Value of waitKey param. Default: 0. out_file (str, optional): The filename to write the image. Default: None. Returns: ndarray: The image with bboxes drawn on it. """ assert bboxes is None or bboxes.ndim == 2, \ f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.' assert labels.ndim == 1, \ f' labels ndim should be 1, but its ndim is {labels.ndim}.' assert bboxes is None or bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \ f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.' assert bboxes is None or bboxes.shape[0] <= labels.shape[0], \ 'labels.shape[0] should not be less than bboxes.shape[0].' assert segms is None or segms.shape[0] == labels.shape[0], \ 'segms.shape[0] and labels.shape[0] should have the same length.' assert segms is not None or bboxes is not None, \ 'segms and bboxes should not be None at the same time.' img = mmcv.imread(img).astype(np.uint8) if score_thr > 0: assert bboxes is not None and bboxes.shape[1] == 5 scores = bboxes[:, -1] inds = scores > score_thr bboxes = bboxes[inds, :] labels = labels[inds] if segms is not None: segms = segms[inds, ...] img = mmcv.bgr2rgb(img) width, height = img.shape[1], img.shape[0] img = np.ascontiguousarray(img) fig = plt.figure(win_name, frameon=False) plt.title(win_name) canvas = fig.canvas dpi = fig.get_dpi() # add a small EPS to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi) # remove white edges by set subplot margin plt.subplots_adjust(left=0, right=1, bottom=0, top=1) ax = plt.gca() ax.axis('off') max_label = int(max(labels) if len(labels) > 0 else 0) text_palette = palette_val(get_palette(text_color, max_label + 1)) text_colors = [text_palette[label] for label in labels] num_bboxes = 0 if bboxes is not None: num_bboxes = bboxes.shape[0] bbox_palette = palette_val(get_palette(bbox_color, max_label + 1)) colors = [bbox_palette[label] for label in labels[:num_bboxes]] draw_bboxes(ax, bboxes, colors, alpha=0.8, thickness=thickness) horizontal_alignment = 'left' positions = bboxes[:, :2].astype(np.int32) + thickness areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0]) scales = _get_adaptive_scales(areas) scores = bboxes[:, 4] if bboxes.shape[1] == 5 else None draw_labels( ax, labels[:num_bboxes], positions, scores=scores, class_names=class_names, color=text_colors, font_size=font_size, scales=scales, horizontal_alignment=horizontal_alignment) if segms is not None: mask_palette = get_palette(mask_color, max_label + 1) colors = [mask_palette[label] for label in labels] colors = np.array(colors, dtype=np.uint8) draw_masks(ax, img, segms, colors, with_edge=True) if num_bboxes < segms.shape[0]: segms = segms[num_bboxes:] horizontal_alignment = 'center' areas = [] positions = [] for mask in segms: _, _, stats, centroids = cv2.connectedComponentsWithStats( mask.astype(np.uint8), connectivity=8) largest_id = np.argmax(stats[1:, -1]) + 1 positions.append(centroids[largest_id]) areas.append(stats[largest_id, -1]) areas = np.stack(areas, axis=0) scales = _get_adaptive_scales(areas) draw_labels( ax, labels[num_bboxes:], positions, class_names=class_names, color=text_colors, font_size=font_size, scales=scales, horizontal_alignment=horizontal_alignment) plt.imshow(img) stream, _ = canvas.print_to_buffer() buffer = np.frombuffer(stream, dtype='uint8') if sys.platform == 'darwin': width, height = canvas.get_width_height(physical=True) img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) img = rgb.astype('uint8') img = mmcv.rgb2bgr(img) if show: # We do not use cv2 for display because in some cases, opencv will # conflict with Qt, it will output a warning: Current thread # is not the object's thread. You can refer to # https://github.com/opencv/opencv-python/issues/46 for details if wait_time == 0: plt.show() else: plt.show(block=False) plt.pause(wait_time) if out_file is not None: mmcv.imwrite(img, out_file) plt.close() return img def imshow_gt_det_bboxes(img, annotation, result, class_names=None, score_thr=0, gt_bbox_color=(61, 102, 255), gt_text_color=(200, 200, 200), gt_mask_color=(61, 102, 255), det_bbox_color=(241, 101, 72), det_text_color=(200, 200, 200), det_mask_color=(241, 101, 72), thickness=2, font_size=13, win_name='', show=True, wait_time=0, out_file=None, overlay_gt_pred=True): """General visualization GT and result function. Args: img (str | ndarray): The image to be displayed. annotation (dict): Ground truth annotations where contain keys of 'gt_bboxes' and 'gt_labels' or 'gt_masks'. result (tuple[list] | list): The detection result, can be either (bbox, segm) or just bbox. class_names (list[str]): Names of each classes. score_thr (float): Minimum score of bboxes to be shown. Default: 0. gt_bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (61, 102, 255). gt_text_color (list[tuple] | tuple | str | None): Colors of texts. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (200, 200, 200). gt_mask_color (list[tuple] | tuple | str | None, optional): Colors of masks. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (61, 102, 255). det_bbox_color (list[tuple] | tuple | str | None):Colors of bbox lines. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (241, 101, 72). det_text_color (list[tuple] | tuple | str | None):Colors of texts. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (200, 200, 200). det_mask_color (list[tuple] | tuple | str | None, optional): Color of masks. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (241, 101, 72). thickness (int): Thickness of lines. Default: 2. font_size (int): Font size of texts. Default: 13. win_name (str): The window name. Default: ''. show (bool): Whether to show the image. Default: True. wait_time (float): Value of waitKey param. Default: 0. out_file (str, optional): The filename to write the image. Default: None. overlay_gt_pred (bool): Whether to plot gts and predictions on the same image. If False, predictions and gts will be plotted on two same image which will be concatenated in vertical direction. The image above is drawn with gt, and the image below is drawn with the prediction result. Default: True. Returns: ndarray: The image with bboxes or masks drawn on it. """ assert 'gt_bboxes' in annotation assert 'gt_labels' in annotation assert isinstance(result, (tuple, list, dict)), 'Expected ' \ f'tuple or list or dict, but get {type(result)}' gt_bboxes = annotation['gt_bboxes'] gt_labels = annotation['gt_labels'] gt_masks = annotation.get('gt_masks', None) if gt_masks is not None: gt_masks = mask2ndarray(gt_masks) gt_seg = annotation.get('gt_semantic_seg', None) if gt_seg is not None: pad_value = 255 # the padding value of gt_seg sem_labels = np.unique(gt_seg) all_labels = np.concatenate((gt_labels, sem_labels), axis=0) all_labels, counts = np.unique(all_labels, return_counts=True) stuff_labels = all_labels[np.logical_and(counts < 2, all_labels != pad_value)] stuff_masks = gt_seg[None] == stuff_labels[:, None, None] gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0) gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)), axis=0) # If you need to show the bounding boxes, # please comment the following line # gt_bboxes = None img = mmcv.imread(img) img_with_gt = imshow_det_bboxes( img, gt_bboxes, gt_labels, gt_masks, class_names=class_names, bbox_color=gt_bbox_color, text_color=gt_text_color, mask_color=gt_mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=False) if not isinstance(result, dict): if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) segms = None if segm_result is not None and len(labels) > 0: # non empty segms = mmcv.concat_list(segm_result) segms = mask_util.decode(segms) segms = segms.transpose(2, 0, 1) else: assert class_names is not None, 'We need to know the number ' \ 'of classes.' VOID = len(class_names) bboxes = None pan_results = result['pan_results'] # keep objects ahead ids = np.unique(pan_results)[::-1] legal_indices = ids != VOID ids = ids[legal_indices] labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) segms = (pan_results[None] == ids[:, None, None]) if overlay_gt_pred: img = imshow_det_bboxes( img_with_gt, bboxes, labels, segms=segms, class_names=class_names, score_thr=score_thr, bbox_color=det_bbox_color, text_color=det_text_color, mask_color=det_mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) else: img_with_det = imshow_det_bboxes( img, bboxes, labels, segms=segms, class_names=class_names, score_thr=score_thr, bbox_color=det_bbox_color, text_color=det_text_color, mask_color=det_mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=False) img = np.concatenate([img_with_gt, img_with_det], axis=0) plt.imshow(img) if show: if wait_time == 0: plt.show() else: plt.show(block=False) plt.pause(wait_time) if out_file is not None: mmcv.imwrite(img, out_file) plt.close() return img ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/core/visualization/palette.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np def palette_val(palette): """Convert palette to matplotlib palette. Args: palette List[tuple]: A list of color tuples. Returns: List[tuple[float]]: A list of RGB matplotlib color tuples. """ new_palette = [] for color in palette: color = [c / 255 for c in color] new_palette.append(tuple(color)) return new_palette def get_palette(palette, num_classes): """Get palette from various inputs. Args: palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs. num_classes (int): the number of classes. Returns: list[tuple[int]]: A list of color tuples. """ assert isinstance(num_classes, int) if isinstance(palette, list): dataset_palette = palette elif isinstance(palette, tuple): dataset_palette = [palette] * num_classes elif palette == 'random' or palette is None: state = np.random.get_state() # random color np.random.seed(42) palette = np.random.randint(0, 256, size=(num_classes, 3)) np.random.set_state(state) dataset_palette = [tuple(c) for c in palette] elif palette == 'coco': from mmdet.datasets import CocoDataset, CocoPanopticDataset dataset_palette = CocoDataset.PALETTE if len(dataset_palette) < num_classes: dataset_palette = CocoPanopticDataset.PALETTE elif palette == 'citys': from mmdet.datasets import CityscapesDataset dataset_palette = CityscapesDataset.PALETTE elif palette == 'voc': from mmdet.datasets import VOCDataset dataset_palette = VOCDataset.PALETTE elif mmcv.is_str(palette): dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes else: raise TypeError(f'Invalid type for palette: {type(palette)}') assert len(dataset_palette) >= num_classes, \ 'The length of palette should not be less than `num_classes`.' return dataset_palette ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .backbones import * # noqa: F401,F403 from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, build_backbone, build_detector, build_head, build_loss, build_neck, build_roi_extractor, build_shared_head) from .dense_heads import * # noqa: F401,F403 from .detectors import * # noqa: F401,F403 from .losses import * # noqa: F401,F403 from .necks import * # noqa: F401,F403 from .plugins import * # noqa: F401,F403 from .roi_heads import * # noqa: F401,F403 from .seg_heads import * # noqa: F401,F403 __all__ = [ 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', 'build_shared_head', 'build_head', 'build_loss', 'build_detector' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .csp_darknet import CSPDarknet from .darknet import Darknet from .detectors_resnet import DetectoRS_ResNet from .detectors_resnext import DetectoRS_ResNeXt from .efficientnet import EfficientNet from .hourglass import HourglassNet from .hrnet import HRNet from .mobilenet_v2 import MobileNetV2 from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2 from .regnet import RegNet from .res2net import Res2Net from .resnest import ResNeSt from .resnet import ResNet, ResNetV1d from .resnext import ResNeXt from .ssd_vgg import SSDVGG from .swin import SwinTransformer from .trident_resnet import TridentResNet __all__ = [ 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet', 'SwinTransformer', 'PyramidVisionTransformer', 'PyramidVisionTransformerV2', 'EfficientNet' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/csp_darknet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES from ..utils import CSPLayer class Focus(nn.Module): """Focus width and height information into channel space. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. kernel_size (int): The kernel size of the convolution. Default: 1 stride (int): The stride of the convolution. Default: 1 conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN', momentum=0.03, eps=0.001). act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish'). """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish')): super().__init__() self.conv = ConvModule( in_channels * 4, out_channels, kernel_size, stride, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) def forward(self, x): # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) patch_top_left = x[..., ::2, ::2] patch_top_right = x[..., ::2, 1::2] patch_bot_left = x[..., 1::2, ::2] patch_bot_right = x[..., 1::2, 1::2] x = torch.cat( ( patch_top_left, patch_bot_left, patch_top_right, patch_bot_right, ), dim=1, ) return self.conv(x) class SPPBottleneck(BaseModule): """Spatial pyramid pooling layer used in YOLOv3-SPP. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling layers. Default: (5, 9, 13). conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish'). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. """ def __init__(self, in_channels, out_channels, kernel_sizes=(5, 9, 13), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) mid_channels = in_channels // 2 self.conv1 = ConvModule( in_channels, mid_channels, 1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.poolings = nn.ModuleList([ nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) for ks in kernel_sizes ]) conv2_channels = mid_channels * (len(kernel_sizes) + 1) self.conv2 = ConvModule( conv2_channels, out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) def forward(self, x): x = self.conv1(x) x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1) x = self.conv2(x) return x @BACKBONES.register_module() class CSPDarknet(BaseModule): """CSP-Darknet backbone used in YOLOv5 and YOLOX. Args: arch (str): Architecture of CSP-Darknet, from {P5, P6}. Default: P5. deepen_factor (float): Depth multiplier, multiply number of blocks in CSP layer by this amount. Default: 1.0. widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0. out_indices (Sequence[int]): Output from which stages. Default: (2, 3, 4). frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1. use_depthwise (bool): Whether to use depthwise separable convolution. Default: False. arch_ovewrite(list): Overwrite default arch settings. Default: None. spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP layers. Default: (5, 9, 13). conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True). act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. Example: >>> from mmdet.models import CSPDarknet >>> import torch >>> self = CSPDarknet(depth=53) >>> self.eval() >>> inputs = torch.rand(1, 3, 416, 416) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) ... (1, 256, 52, 52) (1, 512, 26, 26) (1, 1024, 13, 13) """ # From left to right: # in_channels, out_channels, num_blocks, add_identity, use_spp arch_settings = { 'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False], [256, 512, 9, True, False], [512, 1024, 3, False, True]], 'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False], [256, 512, 9, True, False], [512, 768, 3, True, False], [768, 1024, 3, False, True]] } def __init__(self, arch='P5', deepen_factor=1.0, widen_factor=1.0, out_indices=(2, 3, 4), frozen_stages=-1, use_depthwise=False, arch_ovewrite=None, spp_kernal_sizes=(5, 9, 13), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), norm_eval=False, init_cfg=dict( type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')): super().__init__(init_cfg) arch_setting = self.arch_settings[arch] if arch_ovewrite: arch_setting = arch_ovewrite assert set(out_indices).issubset( i for i in range(len(arch_setting) + 1)) if frozen_stages not in range(-1, len(arch_setting) + 1): raise ValueError('frozen_stages must be in range(-1, ' 'len(arch_setting) + 1). But received ' f'{frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.use_depthwise = use_depthwise self.norm_eval = norm_eval conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule self.stem = Focus( 3, int(arch_setting[0][0] * widen_factor), kernel_size=3, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.layers = ['stem'] for i, (in_channels, out_channels, num_blocks, add_identity, use_spp) in enumerate(arch_setting): in_channels = int(in_channels * widen_factor) out_channels = int(out_channels * widen_factor) num_blocks = max(round(num_blocks * deepen_factor), 1) stage = [] conv_layer = conv( in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(conv_layer) if use_spp: spp = SPPBottleneck( out_channels, out_channels, kernel_sizes=spp_kernal_sizes, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(spp) csp_layer = CSPLayer( out_channels, out_channels, num_blocks=num_blocks, add_identity=add_identity, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(csp_layer) self.add_module(f'stage{i + 1}', nn.Sequential(*stage)) self.layers.append(f'stage{i + 1}') def _freeze_stages(self): if self.frozen_stages >= 0: for i in range(self.frozen_stages + 1): m = getattr(self, self.layers[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(CSPDarknet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() def forward(self, x): outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/darknet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import warnings import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES class ResBlock(BaseModule): """The basic residual block used in Darknet. Each ResBlock consists of two ConvModules and the input is added to the final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer has half of the number of the filters as much as the second convLayer. The first convLayer has filter size of 1x1 and the second one has the filter size of 3x3. Args: in_channels (int): The input channels. Must be even. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(ResBlock, self).__init__(init_cfg) assert in_channels % 2 == 0 # ensure the in_channels is even half_in_channels = in_channels // 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) self.conv2 = ConvModule( half_in_channels, in_channels, 3, padding=1, **cfg) def forward(self, x): residual = x out = self.conv1(x) out = self.conv2(out) out = out + residual return out @BACKBONES.register_module() class Darknet(BaseModule): """Darknet backbone. Args: depth (int): Depth of Darknet. Currently only support 53. out_indices (Sequence[int]): Output from which stages. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import Darknet >>> import torch >>> self = Darknet(depth=53) >>> self.eval() >>> inputs = torch.rand(1, 3, 416, 416) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) ... (1, 256, 52, 52) (1, 512, 26, 26) (1, 1024, 13, 13) """ # Dict(depth: (layers, channels)) arch_settings = { 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), (512, 1024))) } def __init__(self, depth=53, out_indices=(3, 4, 5), frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), norm_eval=True, pretrained=None, init_cfg=None): super(Darknet, self).__init__(init_cfg) if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for darknet') self.depth = depth self.out_indices = out_indices self.frozen_stages = frozen_stages self.layers, self.channels = self.arch_settings[depth] cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg) self.cr_blocks = ['conv1'] for i, n_layers in enumerate(self.layers): layer_name = f'conv_res_block{i + 1}' in_c, out_c = self.channels[i] self.add_module( layer_name, self.make_conv_res_block(in_c, out_c, n_layers, **cfg)) self.cr_blocks.append(layer_name) self.norm_eval = norm_eval assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') def forward(self, x): outs = [] for i, layer_name in enumerate(self.cr_blocks): cr_block = getattr(self, layer_name) x = cr_block(x) if i in self.out_indices: outs.append(x) return tuple(outs) def _freeze_stages(self): if self.frozen_stages >= 0: for i in range(self.frozen_stages): m = getattr(self, self.cr_blocks[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(Darknet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() @staticmethod def make_conv_res_block(in_channels, out_channels, res_repeat, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): """In Darknet backbone, ConvLayer is usually followed by ResBlock. This function will make that. The Conv layers always have 3x3 filters with stride=2. The number of the filters in Conv layer is the same as the out channels of the ResBlock. Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. res_repeat (int): The number of ResBlocks. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). """ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) model = nn.Sequential() model.add_module( 'conv', ConvModule( in_channels, out_channels, 3, stride=2, padding=1, **cfg)) for idx in range(res_repeat): model.add_module('res{}'.format(idx), ResBlock(out_channels, **cfg)) return model ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/detectors_resnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, kaiming_init) from mmcv.runner import Sequential, load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from mmdet.utils import get_root_logger from ..builder import BACKBONES from .resnet import BasicBlock from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): r"""Bottleneck for the ResNet backbone in `DetectoRS `_. This bottleneck allows the users to specify whether to use SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid). Args: inplanes (int): The number of input channels. planes (int): The number of output channels before expansion. rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. sac (dict, optional): Dictionary to construct SAC. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ expansion = 4 def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, init_cfg=None, **kwargs): super(Bottleneck, self).__init__( inplanes, planes, init_cfg=init_cfg, **kwargs) assert sac is None or isinstance(sac, dict) self.sac = sac self.with_sac = sac is not None if self.with_sac: self.conv2 = build_conv_layer( self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False) self.rfp_inplanes = rfp_inplanes if self.rfp_inplanes: self.rfp_conv = build_conv_layer( None, self.rfp_inplanes, planes * self.expansion, 1, stride=1, bias=True) if init_cfg is None: self.init_cfg = dict( type='Constant', val=0, override=dict(name='rfp_conv')) def rfp_forward(self, x, rfp_feat): """The forward function that also takes the RFP features as input.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) if self.rfp_inplanes: rfp_feat = self.rfp_conv(rfp_feat) out = out + rfp_feat out = self.relu(out) return out class ResLayer(Sequential): """ResLayer to build ResNet style backbone for RPF in detectoRS. The difference between this module and base class is that we pass ``rfp_inplanes`` to the first block. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Default: True rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, rfp_inplanes=None, **kwargs): self.block = block assert downsample_first, f'downsample_first={downsample_first} is ' \ 'not supported in DetectoRS' downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down and stride != 1: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, rfp_inplanes=rfp_inplanes, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers) @BACKBONES.register_module() class DetectoRS_ResNet(ResNet): """ResNet backbone for DetectoRS. Args: sac (dict, optional): Dictionary to construct SAC (Switchable Atrous Convolution). Default: None. stage_with_sac (list): Which stage to use sac. Default: (False, False, False, False). rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. output_img (bool): If ``True``, the input image will be inserted into the starting position of output. Default: False. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, init_cfg=None, **kwargs): assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' self.pretrained = pretrained if init_cfg is not None: assert isinstance(init_cfg, dict), \ f'init_cfg must be a dict, but got {type(init_cfg)}' if 'type' in init_cfg: assert init_cfg.get('type') == 'Pretrained', \ 'Only can initialize module by loading a pretrained model' else: raise KeyError('`init_cfg` must contain the key "type"') self.pretrained = init_cfg.get('checkpoint') self.sac = sac self.stage_with_sac = stage_with_sac self.rfp_inplanes = rfp_inplanes self.output_img = output_img super(DetectoRS_ResNet, self).__init__(**kwargs) self.inplanes = self.stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None sac = self.sac if self.stage_with_sac[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None planes = self.base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, sac=sac, rfp_inplanes=rfp_inplanes if i > 0 else None, plugins=stage_plugins) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() # In order to be properly initialized by RFP def init_weights(self): # Calling this method will cause parameter initialization exception # super(DetectoRS_ResNet, self).init_weights() if isinstance(self.pretrained, str): logger = get_root_logger() load_checkpoint(self, self.pretrained, strict=False, logger=logger) elif self.pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.dcn is not None: for m in self.modules(): if isinstance(m, Bottleneck) and hasattr( m.conv2, 'conv_offset'): constant_init(m.conv2.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.""" return ResLayer(**kwargs) def forward(self, x): """Forward function.""" outs = list(super(DetectoRS_ResNet, self).forward(x)) if self.output_img: outs.insert(0, x) return tuple(outs) def rfp_forward(self, x, rfp_feats): """Forward function for RFP.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) rfp_feat = rfp_feats[i] if i > 0 else None for layer in res_layer: x = layer.rfp_forward(x, rfp_feat) if i in self.out_indices: outs.append(x) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/detectors_resnext.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from .detectors_resnet import Bottleneck as _Bottleneck from .detectors_resnet import DetectoRS_ResNet class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): """Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_sac: self.conv2 = build_conv_layer( self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) elif not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) @BACKBONES.register_module() class DetectoRS_ResNeXt(DetectoRS_ResNet): """ResNeXt backbone for DetectoRS. Args: groups (int): The number of groups in ResNeXt. base_width (int): The base width of ResNeXt. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(DetectoRS_ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): return super().make_res_layer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/efficientnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import math from functools import partial import torch import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn.bricks import ConvModule, DropPath from mmcv.runner import BaseModule, Sequential from ..builder import BACKBONES from ..utils import InvertedResidual, SELayer, make_divisible class EdgeResidual(BaseModule): """Edge Residual Block. Args: in_channels (int): The input channels of this module. out_channels (int): The output channels of this module. mid_channels (int): The input channels of the second convolution. kernel_size (int): The kernel size of the first convolution. Defaults to 3. stride (int): The stride of the first convolution. Defaults to 1. se_cfg (dict, optional): Config dict for se layer. Defaults to None, which means no se layer. with_residual (bool): Use residual connection. Defaults to True. conv_cfg (dict, optional): Config dict for convolution layer. Defaults to None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='BN')``. act_cfg (dict): Config dict for activation layer. Defaults to ``dict(type='ReLU')``. drop_path_rate (float): stochastic depth rate. Defaults to 0. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. init_cfg (dict | list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_residual=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), drop_path_rate=0., with_cp=False, init_cfg=None, **kwargs): super(EdgeResidual, self).__init__(init_cfg=init_cfg) assert stride in [1, 2] self.with_cp = with_cp self.drop_path = DropPath( drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.with_se = se_cfg is not None self.with_residual = ( stride == 1 and in_channels == out_channels and with_residual) if self.with_se: assert isinstance(se_cfg, dict) self.conv1 = ConvModule( in_channels=in_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=1, padding=kernel_size // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if self.with_se: self.se = SELayer(**se_cfg) self.conv2 = ConvModule( in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) def forward(self, x): def _inner_forward(x): out = x out = self.conv1(out) if self.with_se: out = self.se(out) out = self.conv2(out) if self.with_residual: return x + self.drop_path(out) else: return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out def model_scaling(layer_setting, arch_setting): """Scaling operation to the layer's parameters according to the arch_setting.""" # scale width new_layer_setting = copy.deepcopy(layer_setting) for layer_cfg in new_layer_setting: for block_cfg in layer_cfg: block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8) # scale depth split_layer_setting = [new_layer_setting[0]] for layer_cfg in new_layer_setting[1:-1]: tmp_index = [0] for i in range(len(layer_cfg) - 1): if layer_cfg[i + 1][1] != layer_cfg[i][1]: tmp_index.append(i + 1) tmp_index.append(len(layer_cfg)) for i in range(len(tmp_index) - 1): split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i + 1]]) split_layer_setting.append(new_layer_setting[-1]) num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]] new_layers = [ int(math.ceil(arch_setting[1] * num)) for num in num_of_layers ] merge_layer_setting = [split_layer_setting[0]] for i, layer_cfg in enumerate(split_layer_setting[1:-1]): if new_layers[i] <= num_of_layers[i]: tmp_layer_cfg = layer_cfg[:new_layers[i]] else: tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * ( new_layers[i] - num_of_layers[i]) if tmp_layer_cfg[0][3] == 1 and i != 0: merge_layer_setting[-1] += tmp_layer_cfg.copy() else: merge_layer_setting.append(tmp_layer_cfg.copy()) merge_layer_setting.append(split_layer_setting[-1]) return merge_layer_setting @BACKBONES.register_module() class EfficientNet(BaseModule): """EfficientNet backbone. Args: arch (str): Architecture of efficientnet. Defaults to b0. out_indices (Sequence[int]): Output from which stages. Defaults to (6, ). frozen_stages (int): Stages to be frozen (all param fixed). Defaults to 0, which means not freezing any parameters. conv_cfg (dict): Config dict for convolution layer. Defaults to None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Defaults to dict(type='BN'). act_cfg (dict): Config dict for activation layer. Defaults to dict(type='Swish'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Defaults to False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. """ # Parameters to build layers. # 'b' represents the architecture of normal EfficientNet family includes # 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'. # 'e' represents the architecture of EfficientNet-EdgeTPU including 'es', # 'em', 'el'. # 6 parameters are needed to construct a layer, From left to right: # - kernel_size: The kernel size of the block # - out_channel: The number of out_channels of the block # - se_ratio: The sequeeze ratio of SELayer. # - stride: The stride of the block # - expand_ratio: The expand_ratio of the mid_channels # - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual layer_settings = { 'b': [[[3, 32, 0, 2, 0, -1]], [[3, 16, 4, 1, 1, 0]], [[3, 24, 4, 2, 6, 0], [3, 24, 4, 1, 6, 0]], [[5, 40, 4, 2, 6, 0], [5, 40, 4, 1, 6, 0]], [[3, 80, 4, 2, 6, 0], [3, 80, 4, 1, 6, 0], [3, 80, 4, 1, 6, 0], [5, 112, 4, 1, 6, 0], [5, 112, 4, 1, 6, 0], [5, 112, 4, 1, 6, 0]], [[5, 192, 4, 2, 6, 0], [5, 192, 4, 1, 6, 0], [5, 192, 4, 1, 6, 0], [5, 192, 4, 1, 6, 0], [3, 320, 4, 1, 6, 0]], [[1, 1280, 0, 1, 0, -1]] ], 'e': [[[3, 32, 0, 2, 0, -1]], [[3, 24, 0, 1, 3, 1]], [[3, 32, 0, 2, 8, 1], [3, 32, 0, 1, 8, 1]], [[3, 48, 0, 2, 8, 1], [3, 48, 0, 1, 8, 1], [3, 48, 0, 1, 8, 1], [3, 48, 0, 1, 8, 1]], [[5, 96, 0, 2, 8, 0], [5, 96, 0, 1, 8, 0], [5, 96, 0, 1, 8, 0], [5, 96, 0, 1, 8, 0], [5, 96, 0, 1, 8, 0], [5, 144, 0, 1, 8, 0], [5, 144, 0, 1, 8, 0], [5, 144, 0, 1, 8, 0], [5, 144, 0, 1, 8, 0]], [[5, 192, 0, 2, 8, 0], [5, 192, 0, 1, 8, 0]], [[1, 1280, 0, 1, 0, -1]] ] } # yapf: disable # Parameters to build different kinds of architecture. # From left to right: scaling factor for width, scaling factor for depth, # resolution. arch_settings = { 'b0': (1.0, 1.0, 224), 'b1': (1.0, 1.1, 240), 'b2': (1.1, 1.2, 260), 'b3': (1.2, 1.4, 300), 'b4': (1.4, 1.8, 380), 'b5': (1.6, 2.2, 456), 'b6': (1.8, 2.6, 528), 'b7': (2.0, 3.1, 600), 'b8': (2.2, 3.6, 672), 'es': (1.0, 1.0, 224), 'em': (1.0, 1.1, 240), 'el': (1.2, 1.4, 300) } def __init__(self, arch='b0', drop_path_rate=0., out_indices=(6, ), frozen_stages=0, conv_cfg=dict(type='Conv2dAdaptivePadding'), norm_cfg=dict(type='BN', eps=1e-3), act_cfg=dict(type='Swish'), norm_eval=False, with_cp=False, init_cfg=[ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', layer=['_BatchNorm', 'GroupNorm'], val=1) ]): super(EfficientNet, self).__init__(init_cfg) assert arch in self.arch_settings, \ f'"{arch}" is not one of the arch_settings ' \ f'({", ".join(self.arch_settings.keys())})' self.arch_setting = self.arch_settings[arch] self.layer_setting = self.layer_settings[arch[:1]] for index in out_indices: if index not in range(0, len(self.layer_setting)): raise ValueError('the item in out_indices must in ' f'range(0, {len(self.layer_setting)}). ' f'But received {index}') if frozen_stages not in range(len(self.layer_setting) + 1): raise ValueError('frozen_stages must be in range(0, ' f'{len(self.layer_setting) + 1}). ' f'But received {frozen_stages}') self.drop_path_rate = drop_path_rate self.out_indices = out_indices self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.layer_setting = model_scaling(self.layer_setting, self.arch_setting) block_cfg_0 = self.layer_setting[0][0] block_cfg_last = self.layer_setting[-1][0] self.in_channels = make_divisible(block_cfg_0[1], 8) self.out_channels = block_cfg_last[1] self.layers = nn.ModuleList() self.layers.append( ConvModule( in_channels=3, out_channels=self.in_channels, kernel_size=block_cfg_0[0], stride=block_cfg_0[3], padding=block_cfg_0[0] // 2, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) self.make_layer() # Avoid building unused layers in mmdetection. if len(self.layers) < max(self.out_indices) + 1: self.layers.append( ConvModule( in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=block_cfg_last[0], stride=block_cfg_last[3], padding=block_cfg_last[0] // 2, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) def make_layer(self): # Without the first and the final conv block. layer_setting = self.layer_setting[1:-1] total_num_blocks = sum([len(x) for x in layer_setting]) block_idx = 0 dpr = [ x.item() for x in torch.linspace(0, self.drop_path_rate, total_num_blocks) ] # stochastic depth decay rule for i, layer_cfg in enumerate(layer_setting): # Avoid building unused layers in mmdetection. if i > max(self.out_indices) - 1: break layer = [] for i, block_cfg in enumerate(layer_cfg): (kernel_size, out_channels, se_ratio, stride, expand_ratio, block_type) = block_cfg mid_channels = int(self.in_channels * expand_ratio) out_channels = make_divisible(out_channels, 8) if se_ratio <= 0: se_cfg = None else: # In mmdetection, the `divisor` is deleted to align # the logic of SELayer with mmcls. se_cfg = dict( channels=mid_channels, ratio=expand_ratio * se_ratio, act_cfg=(self.act_cfg, dict(type='Sigmoid'))) if block_type == 1: # edge tpu if i > 0 and expand_ratio == 3: with_residual = False expand_ratio = 4 else: with_residual = True mid_channels = int(self.in_channels * expand_ratio) if se_cfg is not None: # In mmdetection, the `divisor` is deleted to align # the logic of SELayer with mmcls. se_cfg = dict( channels=mid_channels, ratio=se_ratio * expand_ratio, act_cfg=(self.act_cfg, dict(type='Sigmoid'))) block = partial(EdgeResidual, with_residual=with_residual) else: block = InvertedResidual layer.append( block( in_channels=self.in_channels, out_channels=out_channels, mid_channels=mid_channels, kernel_size=kernel_size, stride=stride, se_cfg=se_cfg, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, drop_path_rate=dpr[block_idx], with_cp=self.with_cp, # In mmdetection, `with_expand_conv` is set to align # the logic of InvertedResidual with mmcls. with_expand_conv=(mid_channels != self.in_channels))) self.in_channels = out_channels block_idx += 1 self.layers.append(Sequential(*layer)) def forward(self, x): outs = [] for i, layer in enumerate(self.layers): x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def _freeze_stages(self): for i in range(self.frozen_stages): m = self.layers[i] m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(EfficientNet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/hourglass.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import BACKBONES from ..utils import ResLayer from .resnet import BasicBlock class HourglassModule(BaseModule): """Hourglass Module for HourglassNet backbone. Generate module recursively and use BasicBlock as the base unit. Args: depth (int): Depth of current HourglassModule. stage_channels (list[int]): Feature channels of sub-modules in current and follow-up HourglassModule. stage_blocks (list[int]): Number of sub-modules stacked in current and follow-up HourglassModule. norm_cfg (dict): Dictionary to construct and config norm layer. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None upsample_cfg (dict, optional): Config dict for interpolate layer. Default: `dict(mode='nearest')` """ def __init__(self, depth, stage_channels, stage_blocks, norm_cfg=dict(type='BN', requires_grad=True), init_cfg=None, upsample_cfg=dict(mode='nearest')): super(HourglassModule, self).__init__(init_cfg) self.depth = depth cur_block = stage_blocks[0] next_block = stage_blocks[1] cur_channel = stage_channels[0] next_channel = stage_channels[1] self.up1 = ResLayer( BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) self.low1 = ResLayer( BasicBlock, cur_channel, next_channel, cur_block, stride=2, norm_cfg=norm_cfg) if self.depth > 1: self.low2 = HourglassModule(depth - 1, stage_channels[1:], stage_blocks[1:]) else: self.low2 = ResLayer( BasicBlock, next_channel, next_channel, next_block, norm_cfg=norm_cfg) self.low3 = ResLayer( BasicBlock, next_channel, cur_channel, cur_block, norm_cfg=norm_cfg, downsample_first=False) self.up2 = F.interpolate self.upsample_cfg = upsample_cfg def forward(self, x): """Forward function.""" up1 = self.up1(x) low1 = self.low1(x) low2 = self.low2(low1) low3 = self.low3(low2) # Fixing `scale factor` (e.g. 2) is common for upsampling, but # in some cases the spatial size is mismatched and error will arise. if 'scale_factor' in self.upsample_cfg: up2 = self.up2(low3, **self.upsample_cfg) else: shape = up1.shape[2:] up2 = self.up2(low3, size=shape, **self.upsample_cfg) return up1 + up2 @BACKBONES.register_module() class HourglassNet(BaseModule): """HourglassNet backbone. Stacked Hourglass Networks for Human Pose Estimation. More details can be found in the `paper `_ . Args: downsample_times (int): Downsample times in a HourglassModule. num_stacks (int): Number of HourglassModule modules stacked, 1 for Hourglass-52, 2 for Hourglass-104. stage_channels (list[int]): Feature channel of each sub-module in a HourglassModule. stage_blocks (list[int]): Number of sub-modules stacked in a HourglassModule. feat_channel (int): Feature channel of conv after a HourglassModule. norm_cfg (dict): Dictionary to construct and config norm layer. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import HourglassNet >>> import torch >>> self = HourglassNet() >>> self.eval() >>> inputs = torch.rand(1, 3, 511, 511) >>> level_outputs = self.forward(inputs) >>> for level_output in level_outputs: ... print(tuple(level_output.shape)) (1, 256, 128, 128) (1, 256, 128, 128) """ def __init__(self, downsample_times=5, num_stacks=2, stage_channels=(256, 256, 384, 384, 384, 512), stage_blocks=(2, 2, 2, 2, 2, 4), feat_channel=256, norm_cfg=dict(type='BN', requires_grad=True), pretrained=None, init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(HourglassNet, self).__init__(init_cfg) self.num_stacks = num_stacks assert self.num_stacks >= 1 assert len(stage_channels) == len(stage_blocks) assert len(stage_channels) > downsample_times cur_channel = stage_channels[0] self.stem = nn.Sequential( ConvModule( 3, cur_channel // 2, 7, padding=3, stride=2, norm_cfg=norm_cfg), ResLayer( BasicBlock, cur_channel // 2, cur_channel, 1, stride=2, norm_cfg=norm_cfg)) self.hourglass_modules = nn.ModuleList([ HourglassModule(downsample_times, stage_channels, stage_blocks) for _ in range(num_stacks) ]) self.inters = ResLayer( BasicBlock, cur_channel, cur_channel, num_stacks - 1, norm_cfg=norm_cfg) self.conv1x1s = nn.ModuleList([ ConvModule( cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range(num_stacks - 1) ]) self.out_convs = nn.ModuleList([ ConvModule( cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) for _ in range(num_stacks) ]) self.remap_convs = nn.ModuleList([ ConvModule( feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range(num_stacks - 1) ]) self.relu = nn.ReLU(inplace=True) def init_weights(self): """Init module weights.""" # Training Centripetal Model needs to reset parameters for Conv2d super(HourglassNet, self).init_weights() for m in self.modules(): if isinstance(m, nn.Conv2d): m.reset_parameters() def forward(self, x): """Forward function.""" inter_feat = self.stem(x) out_feats = [] for ind in range(self.num_stacks): single_hourglass = self.hourglass_modules[ind] out_conv = self.out_convs[ind] hourglass_feat = single_hourglass(inter_feat) out_feat = out_conv(hourglass_feat) out_feats.append(out_feat) if ind < self.num_stacks - 1: inter_feat = self.conv1x1s[ind]( inter_feat) + self.remap_convs[ind]( out_feat) inter_feat = self.inters[ind](self.relu(inter_feat)) return out_feats ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/hrnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import BaseModule, ModuleList, Sequential from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES from .resnet import BasicBlock, Bottleneck class HRModule(BaseModule): """High-Resolution Module for HRNet. In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange is in this module. """ def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), block_init_cfg=None, init_cfg=None): super(HRModule, self).__init__(init_cfg) self.block_init_cfg = block_init_cfg self._check_branches(num_branches, num_blocks, in_channels, num_channels) self.in_channels = in_channels self.num_branches = num_branches self.multiscale_output = multiscale_output self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg self.with_cp = with_cp self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.relu = nn.ReLU(inplace=False) def _check_branches(self, num_branches, num_blocks, in_channels, num_channels): if num_branches != len(num_blocks): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_BLOCKS({len(num_blocks)})' raise ValueError(error_msg) if num_branches != len(num_channels): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_CHANNELS({len(num_channels)})' raise ValueError(error_msg) if num_branches != len(in_channels): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_INCHANNELS({len(in_channels)})' raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or \ self.in_channels[branch_index] != \ num_channels[branch_index] * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, self.in_channels[branch_index], num_channels[branch_index] * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, num_channels[branch_index] * block.expansion)[1]) layers = [] layers.append( block( self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg)) self.in_channels[branch_index] = \ num_channels[branch_index] * block.expansion for i in range(1, num_blocks[branch_index]): layers.append( block( self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg)) return Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append( self._make_one_branch(i, block, num_blocks, num_channels)) return ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return None num_branches = self.num_branches in_channels = self.in_channels fuse_layers = [] num_out_branches = num_branches if self.multiscale_output else 1 for i in range(num_out_branches): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample( scale_factor=2**(j - i), mode='nearest'))) elif j == i: fuse_layer.append(None) else: conv_downsamples = [] for k in range(i - j): if k == i - j - 1: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1])) else: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False))) fuse_layer.append(nn.Sequential(*conv_downsamples)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def forward(self, x): """Forward function.""" if self.num_branches == 1: return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) x_fuse = [] for i in range(len(self.fuse_layers)): y = 0 for j in range(self.num_branches): if i == j: y += x[j] else: y += self.fuse_layers[i][j](x[j]) x_fuse.append(self.relu(y)) return x_fuse @BACKBONES.register_module() class HRNet(BaseModule): """HRNet backbone. `High-Resolution Representations for Labeling Pixels and Regions arXiv: `_. Args: extra (dict): Detailed configuration for each stage of HRNet. There must be 4 stages, the configuration for each stage must have 5 keys: - num_modules(int): The number of HRModule in this stage. - num_branches(int): The number of branches in the HRModule. - block(str): The type of convolution block. - num_blocks(tuple): The number of blocks in each branch. The length must be equal to num_branches. - num_channels(tuple): The number of channels in each branch. The length must be equal to num_branches. in_channels (int): Number of input image channels. Default: 3. conv_cfg (dict): Dictionary to construct and config conv layer. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: True. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. Default: False. multiscale_output (bool): Whether to output multi-level features produced by multiple branches. If False, only the first level feature will be output. Default: True. pretrained (str, optional): Model pretrained path. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. Example: >>> from mmdet.models import HRNet >>> import torch >>> extra = dict( >>> stage1=dict( >>> num_modules=1, >>> num_branches=1, >>> block='BOTTLENECK', >>> num_blocks=(4, ), >>> num_channels=(64, )), >>> stage2=dict( >>> num_modules=1, >>> num_branches=2, >>> block='BASIC', >>> num_blocks=(4, 4), >>> num_channels=(32, 64)), >>> stage3=dict( >>> num_modules=4, >>> num_branches=3, >>> block='BASIC', >>> num_blocks=(4, 4, 4), >>> num_channels=(32, 64, 128)), >>> stage4=dict( >>> num_modules=3, >>> num_branches=4, >>> block='BASIC', >>> num_blocks=(4, 4, 4, 4), >>> num_channels=(32, 64, 128, 256))) >>> self = HRNet(extra, in_channels=1) >>> self.eval() >>> inputs = torch.rand(1, 1, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 32, 8, 8) (1, 64, 4, 4) (1, 128, 2, 2) (1, 256, 1, 1) """ blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False, multiscale_output=True, pretrained=None, init_cfg=None): super(HRNet, self).__init__(init_cfg) self.pretrained = pretrained assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') # Assert configurations of 4 stages are in extra assert 'stage1' in extra and 'stage2' in extra \ and 'stage3' in extra and 'stage4' in extra # Assert whether the length of `num_blocks` and `num_channels` are # equal to `num_branches` for i in range(4): cfg = extra[f'stage{i + 1}'] assert len(cfg['num_blocks']) == cfg['num_branches'] and \ len(cfg['num_channels']) == cfg['num_branches'] self.extra = extra self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.zero_init_residual = zero_init_residual # stem net self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) self.conv1 = build_conv_layer( self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) # stage 1 self.stage1_cfg = self.extra['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = self.stage1_cfg['block'] num_blocks = self.stage1_cfg['num_blocks'][0] block = self.blocks_dict[block_type] stage1_out_channels = num_channels * block.expansion self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) # stage 2 self.stage2_cfg = self.extra['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = self.stage2_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels) self.stage2, pre_stage_channels = self._make_stage( self.stage2_cfg, num_channels) # stage 3 self.stage3_cfg = self.extra['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = self.stage3_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage( self.stage3_cfg, num_channels) # stage 4 self.stage4_cfg = self.extra['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = self.stage4_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels, multiscale_output=multiscale_output) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: the normalization layer named "norm2" """ return getattr(self, self.norm2_name) def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append( nn.Sequential( build_conv_layer( self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True))) else: transition_layers.append(None) else: conv_downsamples = [] for j in range(i + 1 - num_branches_pre): in_channels = num_channels_pre_layer[-1] out_channels = num_channels_cur_layer[i] \ if j == i - num_branches_pre else in_channels conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv_downsamples)) return nn.ModuleList(transition_layers) def _make_layer(self, block, inplanes, planes, blocks, stride=1): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) layers = [] block_init_cfg = None if self.pretrained is None and not hasattr( self, 'init_cfg') and self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) layers.append( block( inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg, )) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg)) return Sequential(*layers) def _make_stage(self, layer_config, in_channels, multiscale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block = self.blocks_dict[layer_config['block']] hr_modules = [] block_init_cfg = None if self.pretrained is None and not hasattr( self, 'init_cfg') and self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) for i in range(num_modules): # multi_scale_output is only used for the last module if not multiscale_output and i == num_modules - 1: reset_multiscale_output = False else: reset_multiscale_output = True hr_modules.append( HRModule( num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, block_init_cfg=block_init_cfg)) return Sequential(*hr_modules), in_channels def forward(self, x): """Forward function.""" x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.conv2(x) x = self.norm2(x) x = self.relu(x) x = self.layer1(x) x_list = [] for i in range(self.stage2_cfg['num_branches']): if self.transition1[i] is not None: x_list.append(self.transition1[i](x)) else: x_list.append(x) y_list = self.stage2(x_list) x_list = [] for i in range(self.stage3_cfg['num_branches']): if self.transition2[i] is not None: x_list.append(self.transition2[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage3(x_list) x_list = [] for i in range(self.stage4_cfg['num_branches']): if self.transition3[i] is not None: x_list.append(self.transition3[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage4(x_list) return y_list def train(self, mode=True): """Convert the model into training mode will keeping the normalization layer freezed.""" super(HRNet, self).train(mode) if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval() ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/mobilenet_v2.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES from ..utils import InvertedResidual, make_divisible @BACKBONES.register_module() class MobileNetV2(BaseModule): """MobileNetV2 backbone. Args: widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0. out_indices (Sequence[int], optional): Output from which stages. Default: (1, 2, 4, 7). frozen_stages (int): Stages to be frozen (all param fixed). Default: -1, which means not freezing any parameters. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ # Parameters to build layers. 4 parameters are needed to construct a # layer, from left to right: expand_ratio, channel, num_blocks, stride. arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] def __init__(self, widen_factor=1., out_indices=(1, 2, 4, 7), frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False, pretrained=None, init_cfg=None): super(MobileNetV2, self).__init__(init_cfg) self.pretrained = pretrained assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') self.widen_factor = widen_factor self.out_indices = out_indices if not set(out_indices).issubset(set(range(0, 8))): raise ValueError('out_indices must be a subset of range' f'(0, 8). But received {out_indices}') if frozen_stages not in range(-1, 8): raise ValueError('frozen_stages must be in range(-1, 8). ' f'But received {frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.in_channels = make_divisible(32 * widen_factor, 8) self.conv1 = ConvModule( in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.layers = [] for i, layer_cfg in enumerate(self.arch_settings): expand_ratio, channel, num_blocks, stride = layer_cfg out_channels = make_divisible(channel * widen_factor, 8) inverted_res_layer = self.make_layer( out_channels=out_channels, num_blocks=num_blocks, stride=stride, expand_ratio=expand_ratio) layer_name = f'layer{i + 1}' self.add_module(layer_name, inverted_res_layer) self.layers.append(layer_name) if widen_factor > 1.0: self.out_channel = int(1280 * widen_factor) else: self.out_channel = 1280 layer = ConvModule( in_channels=self.in_channels, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.add_module('conv2', layer) self.layers.append('conv2') def make_layer(self, out_channels, num_blocks, stride, expand_ratio): """Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Default: 6. """ layers = [] for i in range(num_blocks): if i >= 1: stride = 1 layers.append( InvertedResidual( self.in_channels, out_channels, mid_channels=int(round(self.in_channels * expand_ratio)), stride=stride, with_expand_conv=expand_ratio != 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp)) self.in_channels = out_channels return nn.Sequential(*layers) def _freeze_stages(self): if self.frozen_stages >= 0: for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): layer = getattr(self, f'layer{i}') layer.eval() for param in layer.parameters(): param.requires_grad = False def forward(self, x): """Forward function.""" x = self.conv1(x) outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def train(self, mode=True): """Convert the model into training mode while keep normalization layer frozen.""" super(MobileNetV2, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval() ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/pvt.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import warnings import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (Conv2d, build_activation_layer, build_norm_layer, constant_init, normal_init, trunc_normal_init) from mmcv.cnn.bricks.drop import build_dropout from mmcv.cnn.bricks.transformer import MultiheadAttention from mmcv.cnn.utils.weight_init import trunc_normal_ from mmcv.runner import (BaseModule, ModuleList, Sequential, _load_checkpoint, load_state_dict) from torch.nn.modules.utils import _pair as to_2tuple from ...utils import get_root_logger from ..builder import BACKBONES from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw, pvt_convert class MixFFN(BaseModule): """An implementation of MixFFN of PVT. The differences between MixFFN & FFN: 1. Use 1X1 Conv to replace Linear layer. 2. Introduce 3X3 Depth-wise Conv to encode positional information. Args: embed_dims (int): The feature dimension. Same as `MultiheadAttention`. feedforward_channels (int): The hidden dimension of FFNs. act_cfg (dict, optional): The activation config for FFNs. Default: dict(type='GELU'). ffn_drop (float, optional): Probability of an element to be zeroed in FFN. Default 0.0. dropout_layer (obj:`ConfigDict`): The dropout_layer used when adding the shortcut. Default: None. use_conv (bool): If True, add 3x3 DWConv between two Linear layers. Defaults: False. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, embed_dims, feedforward_channels, act_cfg=dict(type='GELU'), ffn_drop=0., dropout_layer=None, use_conv=False, init_cfg=None): super(MixFFN, self).__init__(init_cfg=init_cfg) self.embed_dims = embed_dims self.feedforward_channels = feedforward_channels self.act_cfg = act_cfg activate = build_activation_layer(act_cfg) in_channels = embed_dims fc1 = Conv2d( in_channels=in_channels, out_channels=feedforward_channels, kernel_size=1, stride=1, bias=True) if use_conv: # 3x3 depth wise conv to provide positional encode information dw_conv = Conv2d( in_channels=feedforward_channels, out_channels=feedforward_channels, kernel_size=3, stride=1, padding=(3 - 1) // 2, bias=True, groups=feedforward_channels) fc2 = Conv2d( in_channels=feedforward_channels, out_channels=in_channels, kernel_size=1, stride=1, bias=True) drop = nn.Dropout(ffn_drop) layers = [fc1, activate, drop, fc2, drop] if use_conv: layers.insert(1, dw_conv) self.layers = Sequential(*layers) self.dropout_layer = build_dropout( dropout_layer) if dropout_layer else torch.nn.Identity() def forward(self, x, hw_shape, identity=None): out = nlc_to_nchw(x, hw_shape) out = self.layers(out) out = nchw_to_nlc(out) if identity is None: identity = x return identity + self.dropout_layer(out) class SpatialReductionAttention(MultiheadAttention): """An implementation of Spatial Reduction Attention of PVT. This module is modified from MultiheadAttention which is a module from mmcv.cnn.bricks.transformer. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. attn_drop (float): A Dropout layer on attn_output_weights. Default: 0.0. proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. Default: 0.0. dropout_layer (obj:`ConfigDict`): The dropout_layer used when adding the shortcut. Default: None. batch_first (bool): Key, Query and Value are shape of (batch, n, embed_dim) or (n, batch, embed_dim). Default: False. qkv_bias (bool): enable bias for qkv if True. Default: True. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). sr_ratio (int): The ratio of spatial reduction of Spatial Reduction Attention of PVT. Default: 1. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, embed_dims, num_heads, attn_drop=0., proj_drop=0., dropout_layer=None, batch_first=True, qkv_bias=True, norm_cfg=dict(type='LN'), sr_ratio=1, init_cfg=None): super().__init__( embed_dims, num_heads, attn_drop, proj_drop, batch_first=batch_first, dropout_layer=dropout_layer, bias=qkv_bias, init_cfg=init_cfg) self.sr_ratio = sr_ratio if sr_ratio > 1: self.sr = Conv2d( in_channels=embed_dims, out_channels=embed_dims, kernel_size=sr_ratio, stride=sr_ratio) # The ret[0] of build_norm_layer is norm name. self.norm = build_norm_layer(norm_cfg, embed_dims)[1] # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa from mmdet import digit_version, mmcv_version if mmcv_version < digit_version('1.3.17'): warnings.warn('The legacy version of forward function in' 'SpatialReductionAttention is deprecated in' 'mmcv>=1.3.17 and will no longer support in the' 'future. Please upgrade your mmcv.') self.forward = self.legacy_forward def forward(self, x, hw_shape, identity=None): x_q = x if self.sr_ratio > 1: x_kv = nlc_to_nchw(x, hw_shape) x_kv = self.sr(x_kv) x_kv = nchw_to_nlc(x_kv) x_kv = self.norm(x_kv) else: x_kv = x if identity is None: identity = x_q # Because the dataflow('key', 'query', 'value') of # ``torch.nn.MultiheadAttention`` is (num_query, batch, # embed_dims), We should adjust the shape of dataflow from # batch_first (batch, num_query, embed_dims) to num_query_first # (num_query ,batch, embed_dims), and recover ``attn_output`` # from num_query_first to batch_first. if self.batch_first: x_q = x_q.transpose(0, 1) x_kv = x_kv.transpose(0, 1) out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] if self.batch_first: out = out.transpose(0, 1) return identity + self.dropout_layer(self.proj_drop(out)) def legacy_forward(self, x, hw_shape, identity=None): """multi head attention forward in mmcv version < 1.3.17.""" x_q = x if self.sr_ratio > 1: x_kv = nlc_to_nchw(x, hw_shape) x_kv = self.sr(x_kv) x_kv = nchw_to_nlc(x_kv) x_kv = self.norm(x_kv) else: x_kv = x if identity is None: identity = x_q out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] return identity + self.dropout_layer(self.proj_drop(out)) class PVTEncoderLayer(BaseModule): """Implements one encoder layer in PVT. Args: embed_dims (int): The feature dimension. num_heads (int): Parallel attention heads. feedforward_channels (int): The hidden dimension for FFNs. drop_rate (float): Probability of an element to be zeroed. after the feed forward layer. Default: 0.0. attn_drop_rate (float): The drop out rate for attention layer. Default: 0.0. drop_path_rate (float): stochastic depth rate. Default: 0.0. qkv_bias (bool): enable bias for qkv if True. Default: True. act_cfg (dict): The activation config for FFNs. Default: dict(type='GELU'). norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). sr_ratio (int): The ratio of spatial reduction of Spatial Reduction Attention of PVT. Default: 1. use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. Default: False. init_cfg (dict, optional): Initialization config dict. Default: None. """ def __init__(self, embed_dims, num_heads, feedforward_channels, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., qkv_bias=True, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), sr_ratio=1, use_conv_ffn=False, init_cfg=None): super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg) # The ret[0] of build_norm_layer is norm name. self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] self.attn = SpatialReductionAttention( embed_dims=embed_dims, num_heads=num_heads, attn_drop=attn_drop_rate, proj_drop=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), qkv_bias=qkv_bias, norm_cfg=norm_cfg, sr_ratio=sr_ratio) # The ret[0] of build_norm_layer is norm name. self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] self.ffn = MixFFN( embed_dims=embed_dims, feedforward_channels=feedforward_channels, ffn_drop=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), use_conv=use_conv_ffn, act_cfg=act_cfg) def forward(self, x, hw_shape): x = self.attn(self.norm1(x), hw_shape, identity=x) x = self.ffn(self.norm2(x), hw_shape, identity=x) return x class AbsolutePositionEmbedding(BaseModule): """An implementation of the absolute position embedding in PVT. Args: pos_shape (int): The shape of the absolute position embedding. pos_dim (int): The dimension of the absolute position embedding. drop_rate (float): Probability of an element to be zeroed. Default: 0.0. """ def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(pos_shape, int): pos_shape = to_2tuple(pos_shape) elif isinstance(pos_shape, tuple): if len(pos_shape) == 1: pos_shape = to_2tuple(pos_shape[0]) assert len(pos_shape) == 2, \ f'The size of image should have length 1 or 2, ' \ f'but got {len(pos_shape)}' self.pos_shape = pos_shape self.pos_dim = pos_dim self.pos_embed = nn.Parameter( torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim)) self.drop = nn.Dropout(p=drop_rate) def init_weights(self): trunc_normal_(self.pos_embed, std=0.02) def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'): """Resize pos_embed weights. Resize pos_embed using bilinear interpolate method. Args: pos_embed (torch.Tensor): Position embedding weights. input_shape (tuple): Tuple for (downsampled input image height, downsampled input image width). mode (str): Algorithm used for upsampling: ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | ``'trilinear'``. Default: ``'bilinear'``. Return: torch.Tensor: The resized pos_embed of shape [B, L_new, C]. """ assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' pos_h, pos_w = self.pos_shape pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] pos_embed_weight = pos_embed_weight.reshape( 1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous() pos_embed_weight = F.interpolate( pos_embed_weight, size=input_shape, mode=mode) pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2).contiguous() pos_embed = pos_embed_weight return pos_embed def forward(self, x, hw_shape, mode='bilinear'): pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode) return self.drop(x + pos_embed) @BACKBONES.register_module() class PyramidVisionTransformer(BaseModule): """Pyramid Vision Transformer (PVT) Implementation of `Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions `_. Args: pretrain_img_size (int | tuple[int]): The size of input image when pretrain. Defaults: 224. in_channels (int): Number of input channels. Default: 3. embed_dims (int): Embedding dimension. Default: 64. num_stags (int): The num of stages. Default: 4. num_layers (Sequence[int]): The layer number of each transformer encode layer. Default: [3, 4, 6, 3]. num_heads (Sequence[int]): The attention heads of each transformer encode layer. Default: [1, 2, 5, 8]. patch_sizes (Sequence[int]): The patch_size of each patch embedding. Default: [4, 2, 2, 2]. strides (Sequence[int]): The stride of each patch embedding. Default: [4, 2, 2, 2]. paddings (Sequence[int]): The padding of each patch embedding. Default: [0, 0, 0, 0]. sr_ratios (Sequence[int]): The spatial reduction rate of each transformer encode layer. Default: [8, 4, 2, 1]. out_indices (Sequence[int] | int): Output from which stages. Default: (0, 1, 2, 3). mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the embedding dim of each transformer encode layer. Default: [8, 8, 4, 4]. qkv_bias (bool): Enable bias for qkv if True. Default: True. drop_rate (float): Probability of an element to be zeroed. Default 0.0. attn_drop_rate (float): The drop out rate for attention layer. Default 0.0. drop_path_rate (float): stochastic depth rate. Default 0.1. use_abs_pos_embed (bool): If True, add absolute position embedding to the patch embedding. Defaults: True. use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. Default: False. act_cfg (dict): The activation config for FFNs. Default: dict(type='GELU'). norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). pretrained (str, optional): model pretrained path. Default: None. convert_weights (bool): The flag indicates whether the pre-trained model is from the original repo. We may need to convert some keys to make it compatible. Default: True. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. """ def __init__(self, pretrain_img_size=224, in_channels=3, embed_dims=64, num_stages=4, num_layers=[3, 4, 6, 3], num_heads=[1, 2, 5, 8], patch_sizes=[4, 2, 2, 2], strides=[4, 2, 2, 2], paddings=[0, 0, 0, 0], sr_ratios=[8, 4, 2, 1], out_indices=(0, 1, 2, 3), mlp_ratios=[8, 8, 4, 4], qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, use_abs_pos_embed=True, norm_after_stage=False, use_conv_ffn=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', eps=1e-6), pretrained=None, convert_weights=True, init_cfg=None): super().__init__(init_cfg=init_cfg) self.convert_weights = convert_weights if isinstance(pretrain_img_size, int): pretrain_img_size = to_2tuple(pretrain_img_size) elif isinstance(pretrain_img_size, tuple): if len(pretrain_img_size) == 1: pretrain_img_size = to_2tuple(pretrain_img_size[0]) assert len(pretrain_img_size) == 2, \ f'The size of image should have length 1 or 2, ' \ f'but got {len(pretrain_img_size)}' assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be setting at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: self.init_cfg = init_cfg else: raise TypeError('pretrained must be a str or None') self.embed_dims = embed_dims self.num_stages = num_stages self.num_layers = num_layers self.num_heads = num_heads self.patch_sizes = patch_sizes self.strides = strides self.sr_ratios = sr_ratios assert num_stages == len(num_layers) == len(num_heads) \ == len(patch_sizes) == len(strides) == len(sr_ratios) self.out_indices = out_indices assert max(out_indices) < self.num_stages self.pretrained = pretrained # transformer encoder dpr = [ x.item() for x in torch.linspace(0, drop_path_rate, sum(num_layers)) ] # stochastic num_layer decay rule cur = 0 self.layers = ModuleList() for i, num_layer in enumerate(num_layers): embed_dims_i = embed_dims * num_heads[i] patch_embed = PatchEmbed( in_channels=in_channels, embed_dims=embed_dims_i, kernel_size=patch_sizes[i], stride=strides[i], padding=paddings[i], bias=True, norm_cfg=norm_cfg) layers = ModuleList() if use_abs_pos_embed: pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1]) pos_embed = AbsolutePositionEmbedding( pos_shape=pos_shape, pos_dim=embed_dims_i, drop_rate=drop_rate) layers.append(pos_embed) layers.extend([ PVTEncoderLayer( embed_dims=embed_dims_i, num_heads=num_heads[i], feedforward_channels=mlp_ratios[i] * embed_dims_i, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=dpr[cur + idx], qkv_bias=qkv_bias, act_cfg=act_cfg, norm_cfg=norm_cfg, sr_ratio=sr_ratios[i], use_conv_ffn=use_conv_ffn) for idx in range(num_layer) ]) in_channels = embed_dims_i # The ret[0] of build_norm_layer is norm name. if norm_after_stage: norm = build_norm_layer(norm_cfg, embed_dims_i)[1] else: norm = nn.Identity() self.layers.append(ModuleList([patch_embed, layers, norm])) cur += num_layer def init_weights(self): logger = get_root_logger() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') for m in self.modules(): if isinstance(m, nn.Linear): trunc_normal_init(m, std=.02, bias=0.) elif isinstance(m, nn.LayerNorm): constant_init(m, 1.0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[ 1] * m.out_channels fan_out //= m.groups normal_init(m, 0, math.sqrt(2.0 / fan_out)) elif isinstance(m, AbsolutePositionEmbedding): m.init_weights() else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' checkpoint = _load_checkpoint( self.init_cfg.checkpoint, logger=logger, map_location='cpu') logger.warn(f'Load pre-trained model for ' f'{self.__class__.__name__} from original repo') if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] elif 'model' in checkpoint: state_dict = checkpoint['model'] else: state_dict = checkpoint if self.convert_weights: # Because pvt backbones are not supported by mmcls, # so we need to convert pre-trained weights to match this # implementation. state_dict = pvt_convert(state_dict) load_state_dict(self, state_dict, strict=False, logger=logger) def forward(self, x): outs = [] for i, layer in enumerate(self.layers): x, hw_shape = layer[0](x) for block in layer[1]: x = block(x, hw_shape) x = layer[2](x) x = nlc_to_nchw(x, hw_shape) if i in self.out_indices: outs.append(x) return outs @BACKBONES.register_module() class PyramidVisionTransformerV2(PyramidVisionTransformer): """Implementation of `PVTv2: Improved Baselines with Pyramid Vision Transformer `_.""" def __init__(self, **kwargs): super(PyramidVisionTransformerV2, self).__init__( patch_sizes=[7, 3, 3, 3], paddings=[3, 1, 1, 1], use_abs_pos_embed=False, norm_after_stage=True, use_conv_ffn=True, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/regnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import numpy as np import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from .resnet import ResNet from .resnext import Bottleneck @BACKBONES.register_module() class RegNet(ResNet): """RegNet backbone. More details can be found in `paper `_ . Args: arch (dict): The parameter of RegNets. - w0 (int): initial width - wa (float): slope of width - wm (float): quantization parameter to quantize the width - depth (int): depth of the backbone - group_w (int): width of group - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. strides (Sequence[int]): Strides of the first block of each stage. base_channels (int): Base channels after stem layer. in_channels (int): Number of input image channels. Default: 3. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import RegNet >>> import torch >>> self = RegNet( arch=dict( w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0)) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 96, 8, 8) (1, 192, 4, 4) (1, 432, 2, 2) (1, 1008, 1, 1) """ arch_settings = { 'regnetx_400mf': dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 'regnetx_800mf': dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), 'regnetx_1.6gf': dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), 'regnetx_3.2gf': dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), 'regnetx_4.0gf': dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), 'regnetx_6.4gf': dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), 'regnetx_8.0gf': dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), 'regnetx_12gf': dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), } def __init__(self, arch, in_channels=3, stem_channels=32, base_channels=32, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None): super(ResNet, self).__init__(init_cfg) # Generate RegNet parameters first if isinstance(arch, str): assert arch in self.arch_settings, \ f'"arch": "{arch}" is not one of the' \ ' arch_settings' arch = self.arch_settings[arch] elif not isinstance(arch, dict): raise ValueError('Expect "arch" to be either a string ' f'or a dict, got {type(arch)}') widths, num_stages = self.generate_regnet( arch['w0'], arch['wa'], arch['wm'], arch['depth'], ) # Convert to per stage format stage_widths, stage_blocks = self.get_stages_from_blocks(widths) # Generate group widths and bot muls group_widths = [arch['group_w'] for _ in range(num_stages)] self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] # Adjust the compatibility of stage_widths and group_widths stage_widths, group_widths = self.adjust_width_group( stage_widths, self.bottleneck_ratio, group_widths) # Group params by stage self.stage_widths = stage_widths self.group_widths = group_widths self.depth = sum(stage_blocks) self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.plugins = plugins self.zero_init_residual = zero_init_residual self.block = Bottleneck expansion_bak = self.block.expansion self.block.expansion = 1 self.stage_blocks = stage_blocks[:num_stages] self._make_stem_layer(in_channels, stem_channels) block_init_cfg = None assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] if self.zero_init_residual: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) else: raise TypeError('pretrained must be a str or None') self.inplanes = stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] group_width = self.group_widths[i] width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) stage_groups = width // group_width dcn = self.dcn if self.stage_with_dcn[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=self.stage_widths[i], num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, groups=stage_groups, base_width=group_width, base_channels=self.stage_widths[i], init_cfg=block_init_cfg) self.inplanes = self.stage_widths[i] layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = stage_widths[-1] self.block.expansion = expansion_bak def _make_stem_layer(self, in_channels, base_channels): self.conv1 = build_conv_layer( self.conv_cfg, in_channels, base_channels, kernel_size=3, stride=2, padding=1, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, base_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) def generate_regnet(self, initial_width, width_slope, width_parameter, depth, divisor=8): """Generates per block width from RegNet parameters. Args: initial_width ([int]): Initial width of the backbone width_slope ([float]): Slope of the quantized linear function width_parameter ([int]): Parameter used to quantize the width. depth ([int]): Depth of the backbone. divisor (int, optional): The divisor of channels. Defaults to 8. Returns: list, int: return a list of widths of each stage and the number \ of stages """ assert width_slope >= 0 assert initial_width > 0 assert width_parameter > 1 assert initial_width % divisor == 0 widths_cont = np.arange(depth) * width_slope + initial_width ks = np.round( np.log(widths_cont / initial_width) / np.log(width_parameter)) widths = initial_width * np.power(width_parameter, ks) widths = np.round(np.divide(widths, divisor)) * divisor num_stages = len(np.unique(widths)) widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() return widths, num_stages @staticmethod def quantize_float(number, divisor): """Converts a float to closest non-zero int divisible by divisor. Args: number (int): Original number to be quantized. divisor (int): Divisor used to quantize the number. Returns: int: quantized number that is divisible by devisor. """ return int(round(number / divisor) * divisor) def adjust_width_group(self, widths, bottleneck_ratio, groups): """Adjusts the compatibility of widths and groups. Args: widths (list[int]): Width of each stage. bottleneck_ratio (float): Bottleneck ratio. groups (int): number of groups in each stage Returns: tuple(list): The adjusted widths and groups of each stage. """ bottleneck_width = [ int(w * b) for w, b in zip(widths, bottleneck_ratio) ] groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] bottleneck_width = [ self.quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_width, groups) ] widths = [ int(w_bot / b) for w_bot, b in zip(bottleneck_width, bottleneck_ratio) ] return widths, groups def get_stages_from_blocks(self, widths): """Gets widths/stage_blocks of network at each stage. Args: widths (list[int]): Width in each stage. Returns: tuple(list): width and depth of each stage """ width_diff = [ width != width_prev for width, width_prev in zip(widths + [0], [0] + widths) ] stage_widths = [ width for width, diff in zip(widths, width_diff[:-1]) if diff ] stage_blocks = np.diff([ depth for depth, diff in zip(range(len(width_diff)), width_diff) if diff ]).tolist() return stage_widths, stage_blocks def forward(self, x): """Forward function.""" x = self.conv1(x) x = self.norm1(x) x = self.relu(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/res2net.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import Sequential from ..builder import BACKBONES from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottle2neck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs): """Bottle2neck block for Res2Net. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' width = int(math.floor(self.planes * (base_width / base_channels))) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width * scales, postfix=1) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width * scales, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) if stage_type == 'stage' and self.conv2_stride != 1: self.pool = nn.AvgPool2d( kernel_size=3, stride=self.conv2_stride, padding=1) convs = [] bns = [] fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: for i in range(scales - 1): convs.append( build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append( build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' for i in range(scales - 1): convs.append( build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append( build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.conv3 = build_conv_layer( self.conv_cfg, width * scales, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.stage_type = stage_type self.scales = scales self.width = width delattr(self, 'conv2') delattr(self, self.norm2_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) spx = torch.split(out, self.width, 1) sp = self.convs[0](spx[0].contiguous()) sp = self.relu(self.bns[0](sp)) out = sp for i in range(1, self.scales - 1): if self.stage_type == 'stage': sp = spx[i] else: sp = sp + spx[i] sp = self.convs[i](sp.contiguous()) sp = self.relu(self.bns[i](sp)) out = torch.cat((out, sp), 1) if self.stage_type == 'normal' or self.conv2_stride == 1: out = torch.cat((out, spx[self.scales - 1]), 1) elif self.stage_type == 'stage': out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out class Res2Layer(Sequential): """Res2Layer to build Res2Net style backbone. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottle2neck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') scales (int): Scales used in Res2Net. Default: 4 base_width (int): Basic width of each scale. Default: 26 """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=True, conv_cfg=None, norm_cfg=dict(type='BN'), scales=4, base_width=26, **kwargs): self.block = block downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False), build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1], ) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, stage_type='stage', **kwargs)) inplanes = planes * block.expansion for i in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, **kwargs)) super(Res2Layer, self).__init__(*layers) @BACKBONES.register_module() class Res2Net(ResNet): """Res2Net backbone. Args: scales (int): Scales used in Res2Net. Default: 4 base_width (int): Basic width of each scale. Default: 26 depth (int): Depth of res2net, from {50, 101, 152}. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Res2net stages. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottle2neck. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length should be same as 'num_stages'. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import Res2Net >>> import torch >>> self = Res2Net(depth=50, scales=4, base_width=26) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 256, 8, 8) (1, 512, 4, 4) (1, 1024, 2, 2) (1, 2048, 1, 1) """ arch_settings = { 50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3)) } def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, pretrained=None, init_cfg=None, **kwargs): self.scales = scales self.base_width = base_width super(Res2Net, self).__init__( style='pytorch', deep_stem=True, avg_down=True, pretrained=pretrained, init_cfg=init_cfg, **kwargs) def make_res_layer(self, **kwargs): return Res2Layer( scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/resnest.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import BaseModule from ..builder import BACKBONES from ..utils import ResLayer from .resnet import Bottleneck as _Bottleneck from .resnet import ResNetV1d class RSoftmax(nn.Module): """Radix Softmax module in ``SplitAttentionConv2d``. Args: radix (int): Radix of input. groups (int): Groups of input. """ def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplitAttentionConv2d(BaseModule): """Split-Attention Conv2d in ResNeSt. Args: in_channels (int): Number of channels in the input feature map. channels (int): Number of intermediate channels. kernel_size (int | tuple[int]): Size of the convolution kernel. stride (int | tuple[int]): Stride of the convolution. padding (int | tuple[int]): Zero-padding added to both sides of dilation (int | tuple[int]): Spacing between kernel elements. groups (int): Number of blocked connections from input channels to output channels. groups (int): Same as nn.Conv2d. radix (int): Radix of SpltAtConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels. Default: 4. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: None. dcn (dict): Config dict for DCN. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, radix=2, reduction_factor=4, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, init_cfg=None): super(SplitAttentionConv2d, self).__init__(init_cfg) inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.groups = groups self.channels = channels self.with_dcn = dcn is not None self.dcn = dcn fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_dcn and not fallback_on_stride: assert conv_cfg is None, 'conv_cfg must be None for DCN' conv_cfg = dcn self.conv = build_conv_layer( conv_cfg, in_channels, channels * radix, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups * radix, bias=False) # To be consistent with original implementation, starting from 0 self.norm0_name, norm0 = build_norm_layer( norm_cfg, channels * radix, postfix=0) self.add_module(self.norm0_name, norm0) self.relu = nn.ReLU(inplace=True) self.fc1 = build_conv_layer( None, channels, inter_channels, 1, groups=self.groups) self.norm1_name, norm1 = build_norm_layer( norm_cfg, inter_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.fc2 = build_conv_layer( None, inter_channels, channels * radix, 1, groups=self.groups) self.rsoftmax = RSoftmax(radix, groups) @property def norm0(self): """nn.Module: the normalization layer named "norm0" """ return getattr(self, self.norm0_name) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) def forward(self, x): x = self.conv(x) x = self.norm0(x) x = self.relu(x) batch, rchannel = x.shape[:2] batch = x.size(0) if self.radix > 1: splits = x.view(batch, self.radix, -1, *x.shape[2:]) gap = splits.sum(dim=1) else: gap = x gap = F.adaptive_avg_pool2d(gap, 1) gap = self.fc1(gap) gap = self.norm1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1) if self.radix > 1: attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) out = torch.sum(attens * splits, dim=1) else: out = atten * x return out.contiguous() class Bottleneck(_Bottleneck): """Bottleneck block for ResNeSt. Args: inplane (int): Input planes of this block. planes (int): Middle planes of this block. groups (int): Groups of conv2. base_width (int): Base of width in terms of base channels. Default: 4. base_channels (int): Base of channels for calculating width. Default: 64. radix (int): Radix of SpltAtConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels in SplitAttentionConv2d. Default: 4. avg_down_stride (bool): Whether to use average pool for stride in Bottleneck. Default: True. kwargs (dict): Key word arguments for base class. """ expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs): """Bottleneck block for ResNeSt.""" super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) self.with_modulated_dcn = False self.conv2 = SplitAttentionConv2d( width, width, kernel_size=3, stride=1 if self.avg_down_stride else self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, radix=radix, reduction_factor=reduction_factor, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=self.dcn) delattr(self, self.norm2_name) if self.avg_down_stride: self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) def forward(self, x): def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) if self.avg_down_stride: out = self.avd_layer(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out @BACKBONES.register_module() class ResNeSt(ResNetV1d): """ResNeSt backbone. Args: groups (int): Number of groups of Bottleneck. Default: 1 base_width (int): Base width of Bottleneck. Default: 4 radix (int): Radix of SplitAttentionConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels in SplitAttentionConv2d. Default: 4. avg_down_stride (bool): Whether to use average pool for stride in Bottleneck. Default: True. kwargs (dict): Keyword arguments for ResNet. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)), 200: (Bottleneck, (3, 24, 36, 3)) } def __init__(self, groups=1, base_width=4, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs): self.groups = groups self.base_width = base_width self.radix = radix self.reduction_factor = reduction_factor self.avg_down_stride = avg_down_stride super(ResNeSt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``.""" return ResLayer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, radix=self.radix, reduction_factor=self.reduction_factor, avg_down_stride=self.avg_down_stride, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/resnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES from ..utils import ResLayer class BasicBlock(BaseModule): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None): super(BasicBlock, self).__init__(init_cfg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out class Bottleneck(BaseModule): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None): """Bottleneck block for ResNet. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(init_cfg) assert style in ['pytorch', 'caffe'] assert dcn is None or isinstance(dcn, dict) assert plugins is None or isinstance(plugins, list) if plugins is not None: allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] assert all(p['position'] in allowed_position for p in plugins) self.inplanes = inplanes self.planes = planes self.stride = stride self.dilation = dilation self.style = style self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.dcn = dcn self.with_dcn = dcn is not None self.plugins = plugins self.with_plugins = plugins is not None if self.with_plugins: # collect plugins for conv1/conv2/conv3 self.after_conv1_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv1' ] self.after_conv2_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv2' ] self.after_conv3_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv3' ] if self.style == 'pytorch': self.conv1_stride = 1 self.conv2_stride = stride else: self.conv1_stride = stride self.conv2_stride = 1 self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.norm3_name, norm3 = build_norm_layer( norm_cfg, planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False if self.with_dcn: fallback_on_stride = dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( conv_cfg, planes, planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.relu = nn.ReLU(inplace=True) self.downsample = downsample if self.with_plugins: self.after_conv1_plugin_names = self.make_block_plugins( planes, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins( planes, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins( planes * self.expansion, self.after_conv3_plugins) def make_block_plugins(self, in_channels, plugins): """make plugins for block. Args: in_channels (int): Input channels of plugin. plugins (list[dict]): List of plugins cfg to build. Returns: list[str]: List of the names of plugin. """ assert isinstance(plugins, list) plugin_names = [] for plugin in plugins: plugin = plugin.copy() name, layer = build_plugin_layer( plugin, in_channels=in_channels, postfix=plugin.pop('postfix', '')) assert not hasattr(self, name), f'duplicate plugin {name}' self.add_module(name, layer) plugin_names.append(name) return plugin_names def forward_plugin(self, x, plugin_names): out = x for name in plugin_names: out = getattr(self, name)(out) return out @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) @property def norm3(self): """nn.Module: normalization layer after the third convolution layer""" return getattr(self, self.norm3_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out @BACKBONES.register_module() class ResNet(BaseModule): """ResNet backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. stem_channels (int | None): Number of stem channels. If not specified, it will be the same as `base_channels`. Default: None. base_channels (int): Number of base channels of res layer. Default: 64. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Resnet stages. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length should be same as 'num_stages'. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import ResNet >>> import torch >>> self = ResNet(depth=18) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 64, 8, 8) (1, 128, 4, 4) (1, 256, 2, 2) (1, 512, 1, 1) """ arch_settings = { 18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, depth, in_channels=3, stem_channels=None, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None): super(ResNet, self).__init__(init_cfg) self.zero_init_residual = zero_init_residual if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for resnet') block_init_cfg = None assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] block = self.arch_settings[depth][0] if self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) else: raise TypeError('pretrained must be a str or None') self.depth = depth if stem_channels is None: stem_channels = base_channels self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.plugins = plugins self.block, stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] self.inplanes = stem_channels self._make_stem_layer(in_channels, stem_channels) self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = strides[i] dilation = dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None if plugins is not None: stage_plugins = self.make_stage_plugins(plugins, i) else: stage_plugins = None planes = base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, init_cfg=block_init_cfg) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = self.block.expansion * base_channels * 2**( len(self.stage_blocks) - 1) def make_stage_plugins(self, plugins, stage_idx): """Make plugins for ResNet ``stage_idx`` th stage. Currently we support to insert ``context_block``, ``empirical_attention_block``, ``nonlocal_block`` into the backbone like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of Bottleneck. An example of plugins format could be: Examples: >>> plugins=[ ... dict(cfg=dict(type='xxx', arg1='xxx'), ... stages=(False, True, True, True), ... position='after_conv2'), ... dict(cfg=dict(type='yyy'), ... stages=(True, True, True, True), ... position='after_conv3'), ... dict(cfg=dict(type='zzz', postfix='1'), ... stages=(True, True, True, True), ... position='after_conv3'), ... dict(cfg=dict(type='zzz', postfix='2'), ... stages=(True, True, True, True), ... position='after_conv3') ... ] >>> self = ResNet(depth=18) >>> stage_plugins = self.make_stage_plugins(plugins, 0) >>> assert len(stage_plugins) == 3 Suppose ``stage_idx=0``, the structure of blocks in the stage would be: .. code-block:: none conv1-> conv2->conv3->yyy->zzz1->zzz2 Suppose 'stage_idx=1', the structure of blocks in the stage would be: .. code-block:: none conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 If stages is missing, the plugin would be applied to all stages. Args: plugins (list[dict]): List of plugins cfg to build. The postfix is required if multiple same type plugins are inserted. stage_idx (int): Index of stage to build Returns: list[dict]: Plugins for current stage """ stage_plugins = [] for plugin in plugins: plugin = plugin.copy() stages = plugin.pop('stages', None) assert stages is None or len(stages) == self.num_stages # whether to insert plugin into current stage if stages is None or stages[stage_idx]: stage_plugins.append(plugin) return stage_plugins def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``.""" return ResLayer(**kwargs) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) def _make_stem_layer(self, in_channels, stem_channels): if self.deep_stem: self.stem = nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, stem_channels // 2, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], nn.ReLU(inplace=True), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels // 2, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], nn.ReLU(inplace=True), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True)) else: self.conv1 = build_conv_layer( self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, stem_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def _freeze_stages(self): if self.frozen_stages >= 0: if self.deep_stem: self.stem.eval() for param in self.stem.parameters(): param.requires_grad = False else: self.norm1.eval() for m in [self.conv1, self.norm1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x): """Forward function.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def train(self, mode=True): """Convert the model into training mode while keep normalization layer freezed.""" super(ResNet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval() @BACKBONES.register_module() class ResNetV1d(ResNet): r"""ResNetV1d variant described in `Bag of Tricks `_. Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in the input stem with three 3x3 convs. And in the downsampling block, a 2x2 avg_pool with stride 2 is added before conv, whose stride is changed to 1. """ def __init__(self, **kwargs): super(ResNetV1d, self).__init__( deep_stem=True, avg_down=True, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/resnext.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from ..utils import ResLayer from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): """Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) if self.with_plugins: self._del_block_plugins(self.after_conv1_plugin_names + self.after_conv2_plugin_names + self.after_conv3_plugin_names) self.after_conv1_plugin_names = self.make_block_plugins( width, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins( width, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins( self.planes * self.expansion, self.after_conv3_plugins) def _del_block_plugins(self, plugin_names): """delete plugins for block if exist. Args: plugin_names (list[str]): List of plugins name to delete. """ assert isinstance(plugin_names, list) for plugin_name in plugin_names: del self._modules[plugin_name] @BACKBONES.register_module() class ResNeXt(ResNet): """ResNeXt backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Resnet stages. Default: 4. groups (int): Group of resnext. base_width (int): Base width of resnext. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``""" return ResLayer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/ssd_vgg.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import VGG from mmcv.runner import BaseModule from ..builder import BACKBONES from ..necks import ssd_neck @BACKBONES.register_module() class SSDVGG(VGG, BaseModule): """VGG Backbone network for single-shot-detection. Args: depth (int): Depth of vgg, from {11, 13, 16, 19}. with_last_pool (bool): Whether to add a pooling layer at the last of the model ceil_mode (bool): When True, will use `ceil` instead of `floor` to compute the output shape. out_indices (Sequence[int]): Output from which stages. out_feature_indices (Sequence[int]): Output from which feature map. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None input_size (int, optional): Deprecated argumment. Width and height of input, from {300, 512}. l2_norm_scale (float, optional) : Deprecated argumment. L2 normalization layer init scale. Example: >>> self = SSDVGG(input_size=300, depth=11) >>> self.eval() >>> inputs = torch.rand(1, 3, 300, 300) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 1024, 19, 19) (1, 512, 10, 10) (1, 256, 5, 5) (1, 256, 3, 3) (1, 256, 1, 1) """ extra_setting = { 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), } def __init__(self, depth, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), pretrained=None, init_cfg=None, input_size=None, l2_norm_scale=None): # TODO: in_channels for mmcv.VGG super(SSDVGG, self).__init__( depth, with_last_pool=with_last_pool, ceil_mode=ceil_mode, out_indices=out_indices) self.features.add_module( str(len(self.features)), nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) self.features.add_module( str(len(self.features)), nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.features.add_module( str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.out_feature_indices = out_feature_indices assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if init_cfg is not None: self.init_cfg = init_cfg elif isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer='BatchNorm2d'), dict(type='Normal', std=0.01, layer='Linear'), ] else: raise TypeError('pretrained must be a str or None') if input_size is not None: warnings.warn('DeprecationWarning: input_size is deprecated') if l2_norm_scale is not None: warnings.warn('DeprecationWarning: l2_norm_scale in VGG is ' 'deprecated, it has been moved to SSDNeck.') def init_weights(self, pretrained=None): super(VGG, self).init_weights() def forward(self, x): """Forward function.""" outs = [] for i, layer in enumerate(self.features): x = layer(x) if i in self.out_feature_indices: outs.append(x) if len(outs) == 1: return outs[0] else: return tuple(outs) class L2Norm(ssd_neck.L2Norm): def __init__(self, **kwargs): super(L2Norm, self).__init__(**kwargs) warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py ' 'is deprecated, please use L2Norm in ' 'mmdet/models/necks/ssd_neck.py instead') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/swin.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_norm_layer, constant_init, trunc_normal_init from mmcv.cnn.bricks.transformer import FFN, build_dropout from mmcv.cnn.utils.weight_init import trunc_normal_ from mmcv.runner import BaseModule, ModuleList, _load_checkpoint from mmcv.utils import to_2tuple from ...utils import get_root_logger from ..builder import BACKBONES from ..utils.ckpt_convert import swin_converter from ..utils.transformer import PatchEmbed, PatchMerging class WindowMSA(BaseModule): """Window based multi-head self-attention (W-MSA) module with relative position bias. Args: embed_dims (int): Number of input channels. num_heads (int): Number of attention heads. window_size (tuple[int]): The height and width of the window. qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. attn_drop_rate (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. init_cfg (dict | None, optional): The Config for initialization. Default: None. """ def __init__(self, embed_dims, num_heads, window_size, qkv_bias=True, qk_scale=None, attn_drop_rate=0., proj_drop_rate=0., init_cfg=None): super().__init__() self.embed_dims = embed_dims self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_embed_dims = embed_dims // num_heads self.scale = qk_scale or head_embed_dims**-0.5 self.init_cfg = init_cfg # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH # About 2x faster than original impl Wh, Ww = self.window_size rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) rel_position_index = rel_index_coords + rel_index_coords.T rel_position_index = rel_position_index.flip(1).contiguous() self.register_buffer('relative_position_index', rel_position_index) self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop_rate) self.proj = nn.Linear(embed_dims, embed_dims) self.proj_drop = nn.Dropout(proj_drop_rate) self.softmax = nn.Softmax(dim=-1) def init_weights(self): trunc_normal_(self.relative_position_bias_table, std=0.02) def forward(self, x, mask=None): """ Args: x (tensor): input features with shape of (num_windows*B, N, C) mask (tensor | None, Optional): mask with shape of (num_windows, Wh*Ww, Wh*Ww), value should be between (-inf, 0]. """ B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # make torchscript happy (cannot use tensor as tuple) q, k, v = qkv[0], qkv[1], qkv[2] q = q * self.scale attn = (q @ k.transpose(-2, -1)) relative_position_bias = self.relative_position_bias_table[ self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute( 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.view(B // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @staticmethod def double_step_seq(step1, len1, step2, len2): seq1 = torch.arange(0, step1 * len1, step1) seq2 = torch.arange(0, step2 * len2, step2) return (seq1[:, None] + seq2[None, :]).reshape(1, -1) class ShiftWindowMSA(BaseModule): """Shifted Window Multihead Self-Attention Module. Args: embed_dims (int): Number of input channels. num_heads (int): Number of attention heads. window_size (int): The height and width of the window. shift_size (int, optional): The shift step of each window towards right-bottom. If zero, act as regular window-msa. Defaults to 0. qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Defaults: None. attn_drop_rate (float, optional): Dropout ratio of attention weight. Defaults: 0. proj_drop_rate (float, optional): Dropout ratio of output. Defaults: 0. dropout_layer (dict, optional): The dropout_layer used before output. Defaults: dict(type='DropPath', drop_prob=0.). init_cfg (dict, optional): The extra config for initialization. Default: None. """ def __init__(self, embed_dims, num_heads, window_size, shift_size=0, qkv_bias=True, qk_scale=None, attn_drop_rate=0, proj_drop_rate=0, dropout_layer=dict(type='DropPath', drop_prob=0.), init_cfg=None): super().__init__(init_cfg) self.window_size = window_size self.shift_size = shift_size assert 0 <= self.shift_size < self.window_size self.w_msa = WindowMSA( embed_dims=embed_dims, num_heads=num_heads, window_size=to_2tuple(window_size), qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop_rate=attn_drop_rate, proj_drop_rate=proj_drop_rate, init_cfg=None) self.drop = build_dropout(dropout_layer) def forward(self, query, hw_shape): B, L, C = query.shape H, W = hw_shape assert L == H * W, 'input feature has wrong size' query = query.view(B, H, W, C) # pad feature maps to multiples of window size pad_r = (self.window_size - W % self.window_size) % self.window_size pad_b = (self.window_size - H % self.window_size) % self.window_size query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) H_pad, W_pad = query.shape[1], query.shape[2] # cyclic shift if self.shift_size > 0: shifted_query = torch.roll( query, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) # calculate attention mask for SW-MSA img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) cnt = 0 for h in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt += 1 # nW, window_size, window_size, 1 mask_windows = self.window_partition(img_mask) mask_windows = mask_windows.view( -1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( attn_mask == 0, float(0.0)) else: shifted_query = query attn_mask = None # nW*B, window_size, window_size, C query_windows = self.window_partition(shifted_query) # nW*B, window_size*window_size, C query_windows = query_windows.view(-1, self.window_size**2, C) # W-MSA/SW-MSA (nW*B, window_size*window_size, C) attn_windows = self.w_msa(query_windows, mask=attn_mask) # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # B H' W' C shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) # reverse cyclic shift if self.shift_size > 0: x = torch.roll( shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x if pad_r > 0 or pad_b: x = x[:, :H, :W, :].contiguous() x = x.view(B, H * W, C) x = self.drop(x) return x def window_reverse(self, windows, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ window_size = self.window_size B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x def window_partition(self, x): """ Args: x: (B, H, W, C) Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape window_size = self.window_size x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() windows = windows.view(-1, window_size, window_size, C) return windows class SwinBlock(BaseModule): """" Args: embed_dims (int): The feature dimension. num_heads (int): Parallel attention heads. feedforward_channels (int): The hidden dimension for FFNs. window_size (int, optional): The local window scale. Default: 7. shift (bool, optional): whether to shift window or not. Default False. qkv_bias (bool, optional): enable bias for qkv if True. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. drop_rate (float, optional): Dropout rate. Default: 0. attn_drop_rate (float, optional): Attention dropout rate. Default: 0. drop_path_rate (float, optional): Stochastic depth rate. Default: 0. act_cfg (dict, optional): The config dict of activation function. Default: dict(type='GELU'). norm_cfg (dict, optional): The config dict of normalization. Default: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict | list | None, optional): The init config. Default: None. """ def __init__(self, embed_dims, num_heads, feedforward_channels, window_size=7, shift=False, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, init_cfg=None): super(SwinBlock, self).__init__() self.init_cfg = init_cfg self.with_cp = with_cp self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] self.attn = ShiftWindowMSA( embed_dims=embed_dims, num_heads=num_heads, window_size=window_size, shift_size=window_size // 2 if shift else 0, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop_rate=attn_drop_rate, proj_drop_rate=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), init_cfg=None) self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] self.ffn = FFN( embed_dims=embed_dims, feedforward_channels=feedforward_channels, num_fcs=2, ffn_drop=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), act_cfg=act_cfg, add_identity=True, init_cfg=None) def forward(self, x, hw_shape): def _inner_forward(x): identity = x x = self.norm1(x) x = self.attn(x, hw_shape) x = x + identity identity = x x = self.norm2(x) x = self.ffn(x, identity=identity) return x if self.with_cp and x.requires_grad: x = cp.checkpoint(_inner_forward, x) else: x = _inner_forward(x) return x class SwinBlockSequence(BaseModule): """Implements one stage in Swin Transformer. Args: embed_dims (int): The feature dimension. num_heads (int): Parallel attention heads. feedforward_channels (int): The hidden dimension for FFNs. depth (int): The number of blocks in this stage. window_size (int, optional): The local window scale. Default: 7. qkv_bias (bool, optional): enable bias for qkv if True. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. drop_rate (float, optional): Dropout rate. Default: 0. attn_drop_rate (float, optional): Attention dropout rate. Default: 0. drop_path_rate (float | list[float], optional): Stochastic depth rate. Default: 0. downsample (BaseModule | None, optional): The downsample operation module. Default: None. act_cfg (dict, optional): The config dict of activation function. Default: dict(type='GELU'). norm_cfg (dict, optional): The config dict of normalization. Default: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict | list | None, optional): The init config. Default: None. """ def __init__(self, embed_dims, num_heads, feedforward_channels, depth, window_size=7, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., downsample=None, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(drop_path_rate, list): drop_path_rates = drop_path_rate assert len(drop_path_rates) == depth else: drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] self.blocks = ModuleList() for i in range(depth): block = SwinBlock( embed_dims=embed_dims, num_heads=num_heads, feedforward_channels=feedforward_channels, window_size=window_size, shift=False if i % 2 == 0 else True, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rates[i], act_cfg=act_cfg, norm_cfg=norm_cfg, with_cp=with_cp, init_cfg=None) self.blocks.append(block) self.downsample = downsample def forward(self, x, hw_shape): for block in self.blocks: x = block(x, hw_shape) if self.downsample: x_down, down_hw_shape = self.downsample(x, hw_shape) return x_down, down_hw_shape, x, hw_shape else: return x, hw_shape, x, hw_shape @BACKBONES.register_module() class SwinTransformer(BaseModule): """ Swin Transformer A PyTorch implement of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/abs/2103.14030 Inspiration from https://github.com/microsoft/Swin-Transformer Args: pretrain_img_size (int | tuple[int]): The size of input image when pretrain. Defaults: 224. in_channels (int): The num of input channels. Defaults: 3. embed_dims (int): The feature dimension. Default: 96. patch_size (int | tuple[int]): Patch size. Default: 4. window_size (int): Window size. Default: 7. mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. Default: 4. depths (tuple[int]): Depths of each Swin Transformer stage. Default: (2, 2, 6, 2). num_heads (tuple[int]): Parallel attention heads of each Swin Transformer stage. Default: (3, 6, 12, 24). strides (tuple[int]): The patch merging or patch embedding stride of each Swin Transformer stage. (In swin, we set kernel size equal to stride.) Default: (4, 2, 2, 2). out_indices (tuple[int]): Output from which stages. Default: (0, 1, 2, 3). qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. patch_norm (bool): If add a norm layer for patch embed and patch merging. Default: True. drop_rate (float): Dropout rate. Defaults: 0. attn_drop_rate (float): Attention dropout rate. Default: 0. drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. use_abs_pos_embed (bool): If True, add absolute position embedding to the patch embedding. Defaults: False. act_cfg (dict): Config dict for activation layer. Default: dict(type='GELU'). norm_cfg (dict): Config dict for normalization layer at output of backone. Defaults: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. pretrained (str, optional): model pretrained path. Default: None. convert_weights (bool): The flag indicates whether the pre-trained model is from the original repo. We may need to convert some keys to make it compatible. Default: False. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). Default: -1 (-1 means not freezing any parameters). init_cfg (dict, optional): The Config for initialization. Defaults to None. """ def __init__(self, pretrain_img_size=224, in_channels=3, embed_dims=96, patch_size=4, window_size=7, mlp_ratio=4, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, pretrained=None, convert_weights=False, frozen_stages=-1, init_cfg=None): self.convert_weights = convert_weights self.frozen_stages = frozen_stages if isinstance(pretrain_img_size, int): pretrain_img_size = to_2tuple(pretrain_img_size) elif isinstance(pretrain_img_size, tuple): if len(pretrain_img_size) == 1: pretrain_img_size = to_2tuple(pretrain_img_size[0]) assert len(pretrain_img_size) == 2, \ f'The size of image should have length 1 or 2, ' \ f'but got {len(pretrain_img_size)}' assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: self.init_cfg = init_cfg else: raise TypeError('pretrained must be a str or None') super(SwinTransformer, self).__init__(init_cfg=init_cfg) num_layers = len(depths) self.out_indices = out_indices self.use_abs_pos_embed = use_abs_pos_embed assert strides[0] == patch_size, 'Use non-overlapping patch embed.' self.patch_embed = PatchEmbed( in_channels=in_channels, embed_dims=embed_dims, conv_type='Conv2d', kernel_size=patch_size, stride=strides[0], norm_cfg=norm_cfg if patch_norm else None, init_cfg=None) if self.use_abs_pos_embed: patch_row = pretrain_img_size[0] // patch_size patch_col = pretrain_img_size[1] // patch_size self.absolute_pos_embed = nn.Parameter( torch.zeros((1, embed_dims, patch_row, patch_col))) self.drop_after_pos = nn.Dropout(p=drop_rate) # set stochastic depth decay rule total_depth = sum(depths) dpr = [ x.item() for x in torch.linspace(0, drop_path_rate, total_depth) ] self.stages = ModuleList() in_channels = embed_dims for i in range(num_layers): if i < num_layers - 1: downsample = PatchMerging( in_channels=in_channels, out_channels=2 * in_channels, stride=strides[i + 1], norm_cfg=norm_cfg if patch_norm else None, init_cfg=None) else: downsample = None stage = SwinBlockSequence( embed_dims=in_channels, num_heads=num_heads[i], feedforward_channels=mlp_ratio * in_channels, depth=depths[i], window_size=window_size, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], downsample=downsample, act_cfg=act_cfg, norm_cfg=norm_cfg, with_cp=with_cp, init_cfg=None) self.stages.append(stage) if downsample: in_channels = downsample.out_channels self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] # Add a norm layer for each output for i in out_indices: layer = build_norm_layer(norm_cfg, self.num_features[i])[1] layer_name = f'norm{i}' self.add_module(layer_name, layer) def train(self, mode=True): """Convert the model into training mode while keep layers freezed.""" super(SwinTransformer, self).train(mode) self._freeze_stages() def _freeze_stages(self): if self.frozen_stages >= 0: self.patch_embed.eval() for param in self.patch_embed.parameters(): param.requires_grad = False if self.use_abs_pos_embed: self.absolute_pos_embed.requires_grad = False self.drop_after_pos.eval() for i in range(1, self.frozen_stages + 1): if (i - 1) in self.out_indices: norm_layer = getattr(self, f'norm{i-1}') norm_layer.eval() for param in norm_layer.parameters(): param.requires_grad = False m = self.stages[i - 1] m.eval() for param in m.parameters(): param.requires_grad = False def init_weights(self): logger = get_root_logger() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') if self.use_abs_pos_embed: trunc_normal_(self.absolute_pos_embed, std=0.02) for m in self.modules(): if isinstance(m, nn.Linear): trunc_normal_init(m, std=.02, bias=0.) elif isinstance(m, nn.LayerNorm): constant_init(m, 1.0) else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' ckpt = _load_checkpoint( self.init_cfg.checkpoint, logger=logger, map_location='cpu') if 'state_dict' in ckpt: _state_dict = ckpt['state_dict'] elif 'model' in ckpt: _state_dict = ckpt['model'] else: _state_dict = ckpt if self.convert_weights: # supported loading weight from original repo, _state_dict = swin_converter(_state_dict) state_dict = OrderedDict() for k, v in _state_dict.items(): if k.startswith('backbone.'): state_dict[k[9:]] = v # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in state_dict.items()} # reshape absolute position embedding if state_dict.get('absolute_pos_embed') is not None: absolute_pos_embed = state_dict['absolute_pos_embed'] N1, L, C1 = absolute_pos_embed.size() N2, C2, H, W = self.absolute_pos_embed.size() if N1 != N2 or C1 != C2 or L != H * W: logger.warning('Error in loading absolute_pos_embed, pass') else: state_dict['absolute_pos_embed'] = absolute_pos_embed.view( N2, H, W, C2).permute(0, 3, 1, 2).contiguous() # interpolate position bias table if needed relative_position_bias_table_keys = [ k for k in state_dict.keys() if 'relative_position_bias_table' in k ] for table_key in relative_position_bias_table_keys: table_pretrained = state_dict[table_key] table_current = self.state_dict()[table_key] L1, nH1 = table_pretrained.size() L2, nH2 = table_current.size() if nH1 != nH2: logger.warning(f'Error in loading {table_key}, pass') elif L1 != L2: S1 = int(L1**0.5) S2 = int(L2**0.5) table_pretrained_resized = F.interpolate( table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), size=(S2, S2), mode='bicubic') state_dict[table_key] = table_pretrained_resized.view( nH2, L2).permute(1, 0).contiguous() # load state_dict self.load_state_dict(state_dict, False) def forward(self, x): x, hw_shape = self.patch_embed(x) if self.use_abs_pos_embed: h, w = self.absolute_pos_embed.shape[1:3] if hw_shape[0] != h or hw_shape[1] != w: absolute_pos_embed = F.interpolate( self.absolute_pos_embed, size=hw_shape, mode='bicubic', align_corners=False).flatten(2).transpose(1, 2) else: absolute_pos_embed = self.absolute_pos_embed.flatten( 2).transpose(1, 2) x = x + absolute_pos_embed x = self.drop_after_pos(x) outs = [] for i, stage in enumerate(self.stages): x, hw_shape, out, out_hw_shape = stage(x, hw_shape) if i in self.out_indices: norm_layer = getattr(self, f'norm{i}') out = norm_layer(out) out = out.view(-1, *out_hw_shape, self.num_features[i]).permute(0, 3, 1, 2).contiguous() outs.append(out) return outs ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/trident_resnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import BaseModule from torch.nn.modules.utils import _pair from mmdet.models.backbones.resnet import Bottleneck, ResNet from mmdet.models.builder import BACKBONES class TridentConv(BaseModule): """Trident Convolution Module. Args: in_channels (int): Number of channels in input. out_channels (int): Number of channels in output. kernel_size (int): Size of convolution kernel. stride (int, optional): Convolution stride. Default: 1. trident_dilations (tuple[int, int, int], optional): Dilations of different trident branch. Default: (1, 2, 3). test_branch_idx (int, optional): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. Default: 1. bias (bool, optional): Whether to use bias in convolution or not. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, trident_dilations=(1, 2, 3), test_branch_idx=1, bias=False, init_cfg=None): super(TridentConv, self).__init__(init_cfg) self.num_branch = len(trident_dilations) self.with_bias = bias self.test_branch_idx = test_branch_idx self.stride = _pair(stride) self.kernel_size = _pair(kernel_size) self.paddings = _pair(trident_dilations) self.dilations = trident_dilations self.in_channels = in_channels self.out_channels = out_channels self.bias = bias self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels, *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.bias = None def extra_repr(self): tmpstr = f'in_channels={self.in_channels}' tmpstr += f', out_channels={self.out_channels}' tmpstr += f', kernel_size={self.kernel_size}' tmpstr += f', num_branch={self.num_branch}' tmpstr += f', test_branch_idx={self.test_branch_idx}' tmpstr += f', stride={self.stride}' tmpstr += f', paddings={self.paddings}' tmpstr += f', dilations={self.dilations}' tmpstr += f', bias={self.bias}' return tmpstr def forward(self, inputs): if self.training or self.test_branch_idx == -1: outputs = [ F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation) for input, dilation, padding in zip( inputs, self.dilations, self.paddings) ] else: assert len(inputs) == 1 outputs = [ F.conv2d(inputs[0], self.weight, self.bias, self.stride, self.paddings[self.test_branch_idx], self.dilations[self.test_branch_idx]) ] return outputs # Since TridentNet is defined over ResNet50 and ResNet101, here we # only support TridentBottleneckBlock. class TridentBottleneck(Bottleneck): """BottleBlock for TridentResNet. Args: trident_dilations (tuple[int, int, int]): Dilations of different trident branch. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. concat_output (bool): Whether to concat the output list to a Tensor. `True` only in the last Block. """ def __init__(self, trident_dilations, test_branch_idx, concat_output, **kwargs): super(TridentBottleneck, self).__init__(**kwargs) self.trident_dilations = trident_dilations self.num_branch = len(trident_dilations) self.concat_output = concat_output self.test_branch_idx = test_branch_idx self.conv2 = TridentConv( self.planes, self.planes, kernel_size=3, stride=self.conv2_stride, bias=False, trident_dilations=self.trident_dilations, test_branch_idx=test_branch_idx, init_cfg=dict( type='Kaiming', distribution='uniform', mode='fan_in', override=dict(name='conv2'))) def forward(self, x): def _inner_forward(x): num_branch = ( self.num_branch if self.training or self.test_branch_idx == -1 else 1) identity = x if not isinstance(x, list): x = (x, ) * num_branch identity = x if self.downsample is not None: identity = [self.downsample(b) for b in x] out = [self.conv1(b) for b in x] out = [self.norm1(b) for b in out] out = [self.relu(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv1_plugin_names) out = self.conv2(out) out = [self.norm2(b) for b in out] out = [self.relu(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv2_plugin_names) out = [self.conv3(b) for b in out] out = [self.norm3(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv3_plugin_names) out = [ out_b + identity_b for out_b, identity_b in zip(out, identity) ] return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = [self.relu(b) for b in out] if self.concat_output: out = torch.cat(out, dim=0) return out def make_trident_res_layer(block, inplanes, planes, num_blocks, stride=1, trident_dilations=(1, 2, 3), style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, test_branch_idx=-1): """Build Trident Res Layers.""" downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] for i in range(num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=stride if i == 0 else 1, trident_dilations=trident_dilations, downsample=downsample if i == 0 else None, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=plugins, test_branch_idx=test_branch_idx, concat_output=True if i == num_blocks - 1 else False)) inplanes = planes * block.expansion return nn.Sequential(*layers) @BACKBONES.register_module() class TridentResNet(ResNet): """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to ResNet, while in stage 3, Trident BottleBlock is utilized to replace the normal BottleBlock to yield trident output. Different branch shares the convolution weight but uses different dilations to achieve multi-scale output. / stage3(b0) \ x - stem - stage1 - stage2 - stage3(b1) - output \ stage3(b2) / Args: depth (int): Depth of resnet, from {50, 101, 152}. num_branch (int): Number of branches in TridentNet. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. trident_dilations (tuple[int]): Dilations of different trident branch. len(trident_dilations) should be equal to num_branch. """ # noqa def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, **kwargs): assert num_branch == len(trident_dilations) assert depth in (50, 101, 152) super(TridentResNet, self).__init__(depth, **kwargs) assert self.num_stages == 3 self.test_branch_idx = test_branch_idx self.num_branch = num_branch last_stage_idx = self.num_stages - 1 stride = self.strides[last_stage_idx] dilation = trident_dilations dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, last_stage_idx) else: stage_plugins = None planes = self.base_channels * 2**last_stage_idx res_layer = make_trident_res_layer( TridentBottleneck, inplanes=(self.block.expansion * self.base_channels * 2**(last_stage_idx - 1)), planes=planes, num_blocks=self.stage_blocks[last_stage_idx], stride=stride, trident_dilations=dilation, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, test_branch_idx=self.test_branch_idx) layer_name = f'layer{last_stage_idx + 1}' self.__setattr__(layer_name, res_layer) self.res_layers.pop(last_stage_idx) self.res_layers.insert(last_stage_idx, layer_name) self._freeze_stages() ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/builder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.utils import Registry MODELS = Registry('models', parent=MMCV_MODELS) BACKBONES = MODELS NECKS = MODELS ROI_EXTRACTORS = MODELS SHARED_HEADS = MODELS HEADS = MODELS LOSSES = MODELS DETECTORS = MODELS def build_backbone(cfg): """Build backbone.""" return BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" return NECKS.build(cfg) def build_roi_extractor(cfg): """Build roi extractor.""" return ROI_EXTRACTORS.build(cfg) def build_shared_head(cfg): """Build shared head.""" return SHARED_HEADS.build(cfg) def build_head(cfg): """Build head.""" return HEADS.build(cfg) def build_loss(cfg): """Build loss.""" return LOSSES.build(cfg) def build_detector(cfg, train_cfg=None, test_cfg=None): """Build detector.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' return DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead from .centernet_head import CenterNetHead from .centripetal_head import CentripetalHead from .corner_head import CornerHead from .ddod_head import DDODHead from .deformable_detr_head import DeformableDETRHead from .detr_head import DETRHead from .embedding_rpn_head import EmbeddingRPNHead from .fcos_head import FCOSHead from .fovea_head import FoveaHead from .free_anchor_retina_head import FreeAnchorRetinaHead from .fsaf_head import FSAFHead from .ga_retina_head import GARetinaHead from .ga_rpn_head import GARPNHead from .gfl_head import GFLHead from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead from .lad_head import LADHead from .ld_head import LDHead from .mask2former_head import Mask2FormerHead from .maskformer_head import MaskFormerHead from .nasfcos_head import NASFCOSHead from .paa_head import PAAHead from .pisa_retinanet_head import PISARetinaHead from .pisa_ssd_head import PISASSDHead from .reppoints_head import RepPointsHead from .retina_head import RetinaHead from .retina_sepbn_head import RetinaSepBNHead from .rpn_head import RPNHead from .sabl_retina_head import SABLRetinaHead from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead from .solov2_head import SOLOV2Head from .ssd_head import SSDHead from .tood_head import TOODHead from .vfnet_head import VFNetHead from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead from .yolo_head import YOLOV3Head from .yolof_head import YOLOFHead from .yolox_head import YOLOXHead __all__ = [ 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead', 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead', 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead', 'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead', 'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'SOLOV2Head', 'DDODHead' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/anchor_free_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings from abc import abstractmethod import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import force_fp32 from mmdet.core import build_bbox_coder, multi_apply from mmdet.core.anchor.point_generator import MlvlPointGenerator from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): """Anchor-free head (FCOS, Fovea, RepPoints, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. stacked_convs (int): Number of stacking convs of the head. strides (tuple): Downsample factor of each feature map. dcn_on_last_conv (bool): If true, use dcn in the last layer of towers. Default: False. conv_bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. bbox_coder (dict): Config of bbox coder. Defaults 'DistancePointBBoxCoder'. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 _version = 1 def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), dcn_on_last_conv=False, conv_bias='auto', loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), bbox_coder=dict(type='DistancePointBBoxCoder'), conv_cfg=None, norm_cfg=None, train_cfg=None, test_cfg=None, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01))): super(AnchorFreeHead, self).__init__(init_cfg) self.num_classes = num_classes self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.dcn_on_last_conv = dcn_on_last_conv assert conv_bias == 'auto' or isinstance(conv_bias, bool) self.conv_bias = conv_bias self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.bbox_coder = build_bbox_coder(bbox_coder) self.prior_generator = MlvlPointGenerator(strides) # In order to keep a more general interface and be consistent with # anchor_head. We can think of point like one anchor self.num_base_priors = self.prior_generator.num_base_priors[0] self.train_cfg = train_cfg self.test_cfg = test_cfg self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self._init_layers() def _init_layers(self): """Initialize layers of the head.""" self._init_cls_convs() self._init_reg_convs() self._init_predictor() def _init_cls_convs(self): """Initialize classification conv layers of the head.""" self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_reg_convs(self): """Initialize bbox regression conv layers of the head.""" self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_predictor(self): """Initialize predictor layers of the head.""" self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """Hack some keys of the model state dict so that can load checkpoints of previous version.""" version = local_metadata.get('version', None) if version is None: # the key is different in early versions # for example, 'fcos_cls' become 'conv_cls' now bbox_head_keys = [ k for k in state_dict.keys() if k.startswith(prefix) ] ori_predictor_keys = [] new_predictor_keys = [] # e.g. 'fcos_cls' or 'fcos_reg' for key in bbox_head_keys: ori_predictor_keys.append(key) key = key.split('.') conv_name = None if key[1].endswith('cls'): conv_name = 'conv_cls' elif key[1].endswith('reg'): conv_name = 'conv_reg' elif key[1].endswith('centerness'): conv_name = 'conv_centerness' else: assert NotImplementedError if conv_name is not None: key[1] = conv_name new_predictor_keys.append('.'.join(key)) else: ori_predictor_keys.pop(-1) for i in range(len(new_predictor_keys)): state_dict[new_predictor_keys[i]] = state_dict.pop( ori_predictor_keys[i]) super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually contain classification scores and bbox predictions. cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, feats)[:2] def forward_single(self, x): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. Returns: tuple: Scores for each class, bbox predictions, features after classification and regression conv layers, some models needs these features like FCOS. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) return cls_score, bbox_pred, cls_feat, reg_feat @abstractmethod @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. """ raise NotImplementedError @abstractmethod def get_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). """ raise NotImplementedError def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points of a single scale level. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `AnchorFreeHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map ' 'with `self.prior_generator.single_level_grid_priors` ') h, w = featmap_size # First create Range with the default dtype, than convert to # target `dtype` for onnx exporting. x_range = torch.arange(w, device=device).to(dtype) y_range = torch.arange(h, device=device).to(dtype) y, x = torch.meshgrid(y_range, x_range) if flatten: y = y.flatten() x = x.flatten() return y, x def get_points(self, featmap_sizes, dtype, device, flatten=False): """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. dtype (torch.dtype): Type of points. device (torch.device): Device of points. Returns: tuple: points of each image. """ warnings.warn( '`get_points` in `AnchorFreeHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of all levels ' 'with `self.prior_generator.grid_priors` ') mlvl_points = [] for i in range(len(featmap_sizes)): mlvl_points.append( self._get_points_single(featmap_sizes[i], self.strides[i], dtype, device, flatten)) return mlvl_points def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/anchor_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder, build_prior_generator, build_sampler, images_to_levels, multi_apply, unmap) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class AnchorHead(BaseDenseHead, BBoxTestMixin): """Anchor-based head (RPN, RetinaNet, SSD, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 def __init__(self, num_classes, in_channels, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=(.0, .0, .0, .0), target_stds=(1.0, 1.0, 1.0, 1.0)), reg_decoded_bbox=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=dict(type='Normal', layer='Conv2d', std=0.01)): super(AnchorHead, self).__init__(init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 if self.cls_out_channels <= 0: raise ValueError(f'num_classes={num_classes} is too small') self.reg_decoded_bbox = reg_decoded_bbox self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) if hasattr(self.train_cfg, 'sampler') and self.train_cfg.sampler.type.split( '.')[-1] != 'PseudoSampler': self.sampling = True sampler_cfg = self.train_cfg.sampler # avoid BC-breaking if loss_cls['type'] in [ 'FocalLoss', 'GHMC', 'QualityFocalLoss' ]: warnings.warn( 'DeprecationWarning: Determining whether to sampling' 'by loss type is deprecated, please delete sampler in' 'your config when using `FocalLoss`, `GHMC`, ' '`QualityFocalLoss` or other FocalLoss variant.') self.sampling = False sampler_cfg = dict(type='PseudoSampler') else: self.sampling = False sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self.prior_generator = build_prior_generator(anchor_generator) # Usually the numbers of anchors for each level are the same # except SSD detectors. So it is an int in the most dense # heads but a list of int in SSDHead self.num_base_priors = self.prior_generator.num_base_priors[0] self._init_layers() @property def num_anchors(self): warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'for consistency or also use ' '`num_base_priors` instead') return self.prior_generator.num_base_priors[0] @property def anchor_generator(self): warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' 'please use "prior_generator" instead') return self.prior_generator def _init_layers(self): """Initialize layers of the head.""" self.conv_cls = nn.Conv2d(self.in_channels, self.num_base_priors * self.cls_out_channels, 1) self.conv_reg = nn.Conv2d(self.in_channels, self.num_base_priors * 4, 1) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_base_priors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_base_priors * 4. """ cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) return cls_score, bbox_pred def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_scores (list[Tensor]): Classification scores for all \ scale levels, each is a 4D-tensor, the channels number \ is num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all \ scale levels, each is a 4D-tensor, the channels number \ is num_base_priors * 4. """ return multi_apply(self.forward_single, feats) def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): Device for returned tensors Returns: tuple: anchor_list (list[Tensor]): Anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = self.prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). img_meta (dict): Meta info of the image. gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: labels_list (list[Tensor]): Labels of each level label_weights_list (list[Tensor]): Label weights of each level bbox_targets_list (list[Tensor]): BBox targets of each level bbox_weights_list (list[Tensor]): BBox weights of each level num_total_pos (int): Number of positive samples in all images num_total_neg (int): Number of negative samples in all images """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] assign_result = self.assigner.assign( anchors, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True, return_sampling_results=False): """Compute regression and classification targets for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - bbox_weights_list (list[Tensor]): BBox weights of each level. - num_total_pos (int): Number of positive samples in all images. - num_total_neg (int): Number of negative samples in all images. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors to a single tensor concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] rest_results = list(results[7:]) # user-added return values # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) res = (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) if return_sampling_results: res = res + (sampling_results_list, ) for i, r in enumerate(rest_results): # user-added return values rest_results[i] = images_to_levels(r, num_level_anchors) return res + tuple(rest_results) def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Compute loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (N, num_total_anchors, 4). num_total_samples (int): If sampling, num total samples equal to the number of total anchors; Otherwise, it is the number of positive anchors. Returns: dict[str, Tensor]: A dictionary of loss components. """ # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # regression loss bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. anchors = anchors.reshape(-1, 4) bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,), The length of list should always be 1. """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/atss_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, images_to_levels, multi_apply, reduce_mean, unmap) from ..builder import HEADS, build_loss from .anchor_head import AnchorHead @HEADS.register_module() class ATSSHead(AnchorHead): """Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection. ATSS head structure is similar with FCOS, however ATSS use anchor boxes and assign label by Adaptive Training Sample Selection instead max-iou. https://arxiv.org/abs/1912.02424 """ def __init__(self, num_classes, in_channels, pred_kernel_size=3, stacked_convs=4, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), reg_decoded_bbox=True, loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='atss_cls', std=0.01, bias_prob=0.01)), **kwargs): self.pred_kernel_size = pred_kernel_size self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(ATSSHead, self).__init__( num_classes, in_channels, reg_decoded_bbox=reg_decoded_bbox, init_cfg=init_cfg, **kwargs) self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.loss_centerness = build_loss(loss_centerness) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) pred_pad_size = self.pred_kernel_size // 2 self.atss_cls = nn.Conv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, self.pred_kernel_size, padding=pred_pad_size) self.atss_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, self.pred_kernel_size, padding=pred_pad_size) self.atss_centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, self.pred_kernel_size, padding=pred_pad_size) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. centerness (Tensor): Centerness for a single scale level, the channel number is (N, num_anchors * 1, H, W). """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() centerness = self.atss_centerness(reg_feat) return cls_score, bbox_pred, centerness def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels, label_weights, bbox_targets, num_total_samples): """Compute loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). num_total_samples (int): Number os positive samples that is reduced over all GPUs. Returns: dict[str, Tensor]: A dictionary of loss components. """ anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) centerness = centerness.permute(0, 2, 3, 1).reshape(-1) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # classification loss loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_centerness = centerness[pos_inds] centerness_targets = self.centerness_target( pos_anchors, pos_bbox_targets) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_bbox_targets, weight=centerness_targets, avg_factor=1.0) # centerness loss loss_centerness = self.loss_centerness( pos_centerness, centerness_targets, avg_factor=num_total_samples) else: loss_bbox = bbox_pred.sum() * 0 loss_centerness = centerness.sum() * 0 centerness_targets = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) centernesses (list[Tensor]): Centerness for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos, dtype=torch.float, device=device)).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, loss_centerness,\ bbox_avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, centernesses, labels_list, label_weights_list, bbox_targets_list, num_total_samples=num_total_samples) bbox_avg_factor = sum(bbox_avg_factor) bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_centerness=loss_centerness) def centerness_target(self, anchors, gts): # only calculate pos centerness targets, otherwise there may be nan anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 l_ = anchors_cx - gts[:, 0] t_ = anchors_cy - gts[:, 1] r_ = gts[:, 2] - anchors_cx b_ = gts[:, 3] - anchors_cy left_right = torch.stack([l_, r_], dim=1) top_bottom = torch.stack([t_, b_], dim=1) centerness = torch.sqrt( (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) assert not torch.isnan(centerness).any() return centerness def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get targets for ATSS head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_anchors, valid_flags, num_level_anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors Tensor): Number of anchors of each scale level. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) assign_result = self.assigner.assign(anchors, num_level_anchors_inside, gt_bboxes, gt_bboxes_ignore, gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if self.reg_decoded_bbox: pos_bbox_targets = sampling_result.pos_gt_bboxes else: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/autoassign_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import bias_init_with_prob, normal_init from mmcv.runner import force_fp32 from mmdet.core import multi_apply from mmdet.core.anchor.point_generator import MlvlPointGenerator from mmdet.core.bbox import bbox_overlaps from mmdet.models import HEADS from mmdet.models.dense_heads.atss_head import reduce_mean from mmdet.models.dense_heads.fcos_head import FCOSHead from mmdet.models.dense_heads.paa_head import levels_to_images EPS = 1e-12 class CenterPrior(nn.Module): """Center Weighting module to adjust the category-specific prior distributions. Args: force_topk (bool): When no point falls into gt_bbox, forcibly select the k points closest to the center to calculate the center prior. Defaults to False. topk (int): The number of points used to calculate the center prior when no point falls in gt_bbox. Only work when force_topk if True. Defaults to 9. num_classes (int): The class number of dataset. Defaults to 80. strides (tuple[int]): The stride of each input feature map. Defaults to (8, 16, 32, 64, 128). """ def __init__(self, force_topk=False, topk=9, num_classes=80, strides=(8, 16, 32, 64, 128)): super(CenterPrior, self).__init__() self.mean = nn.Parameter(torch.zeros(num_classes, 2)) self.sigma = nn.Parameter(torch.ones(num_classes, 2)) self.strides = strides self.force_topk = force_topk self.topk = topk def forward(self, anchor_points_list, gt_bboxes, labels, inside_gt_bbox_mask): """Get the center prior of each point on the feature map for each instance. Args: anchor_points_list (list[Tensor]): list of coordinate of points on feature map. Each with shape (num_points, 2). gt_bboxes (Tensor): The gt_bboxes with shape of (num_gt, 4). labels (Tensor): The gt_labels with shape of (num_gt). inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. Returns: tuple(Tensor): - center_prior_weights(Tensor): Float tensor with shape \ of (num_points, num_gt). Each value represents \ the center weighting coefficient. - inside_gt_bbox_mask (Tensor): Tensor of bool type, \ with shape of (num_points, num_gt), each \ value is used to mark whether this point falls \ within a certain gt or is the topk nearest points for \ a specific gt_bbox. """ inside_gt_bbox_mask = inside_gt_bbox_mask.clone() num_gts = len(labels) num_points = sum([len(item) for item in anchor_points_list]) if num_gts == 0: return gt_bboxes.new_zeros(num_points, num_gts), inside_gt_bbox_mask center_prior_list = [] for slvl_points, stride in zip(anchor_points_list, self.strides): # slvl_points: points from single level in FPN, has shape (h*w, 2) # single_level_points has shape (h*w, num_gt, 2) single_level_points = slvl_points[:, None, :].expand( (slvl_points.size(0), len(gt_bboxes), 2)) gt_center_x = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2) gt_center_y = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2) gt_center = torch.stack((gt_center_x, gt_center_y), dim=1) gt_center = gt_center[None] # instance_center has shape (1, num_gt, 2) instance_center = self.mean[labels][None] # instance_sigma has shape (1, num_gt, 2) instance_sigma = self.sigma[labels][None] # distance has shape (num_points, num_gt, 2) distance = (((single_level_points - gt_center) / float(stride) - instance_center)**2) center_prior = torch.exp(-distance / (2 * instance_sigma**2)).prod(dim=-1) center_prior_list.append(center_prior) center_prior_weights = torch.cat(center_prior_list, dim=0) if self.force_topk: gt_inds_no_points_inside = torch.nonzero( inside_gt_bbox_mask.sum(0) == 0).reshape(-1) if gt_inds_no_points_inside.numel(): topk_center_index = \ center_prior_weights[:, gt_inds_no_points_inside].topk( self.topk, dim=0)[1] temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside] inside_gt_bbox_mask[:, gt_inds_no_points_inside] = \ torch.scatter(temp_mask, dim=0, index=topk_center_index, src=torch.ones_like( topk_center_index, dtype=torch.bool)) center_prior_weights[~inside_gt_bbox_mask] = 0 return center_prior_weights, inside_gt_bbox_mask @HEADS.register_module() class AutoAssignHead(FCOSHead): """AutoAssignHead head used in AutoAssign. More details can be found in the `paper `_ . Args: force_topk (bool): Used in center prior initialization to handle extremely small gt. Default is False. topk (int): The number of points used to calculate the center prior when no point falls in gt_bbox. Only work when force_topk if True. Defaults to 9. pos_loss_weight (float): The loss weight of positive loss and with default value 0.25. neg_loss_weight (float): The loss weight of negative loss and with default value 0.75. center_loss_weight (float): The loss weight of center prior loss and with default value 0.75. """ def __init__(self, *args, force_topk=False, topk=9, pos_loss_weight=0.25, neg_loss_weight=0.75, center_loss_weight=0.75, **kwargs): super().__init__(*args, conv_bias=True, **kwargs) self.center_prior = CenterPrior( force_topk=force_topk, topk=topk, num_classes=self.num_classes, strides=self.strides) self.pos_loss_weight = pos_loss_weight self.neg_loss_weight = neg_loss_weight self.center_loss_weight = center_loss_weight self.prior_generator = MlvlPointGenerator(self.strides, offset=0) def init_weights(self): """Initialize weights of the head. In particular, we have special initialization for classified conv's and regression conv's bias """ super(AutoAssignHead, self).init_weights() bias_cls = bias_init_with_prob(0.02) normal_init(self.conv_cls, std=0.01, bias=bias_cls) normal_init(self.conv_reg, std=0.01, bias=4.0) def forward_single(self, x, scale, stride): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions and centerness \ predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super( FCOSHead, self).forward_single(x) centerness = self.conv_centerness(reg_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) bbox_pred *= stride return cls_score, bbox_pred, centerness def get_pos_loss_single(self, cls_score, objectness, reg_loss, gt_labels, center_prior_weights): """Calculate the positive loss of all points in gt_bboxes. Args: cls_score (Tensor): All category scores for each point on the feature map. The shape is (num_points, num_class). objectness (Tensor): Foreground probability of all points, has shape (num_points, 1). reg_loss (Tensor): The regression loss of each gt_bbox and each prediction box, has shape of (num_points, num_gt). gt_labels (Tensor): The zeros based gt_labels of all gt with shape of (num_gt,). center_prior_weights (Tensor): Float tensor with shape of (num_points, num_gt). Each value represents the center weighting coefficient. Returns: tuple[Tensor]: - pos_loss (Tensor): The positive loss of all points in the gt_bboxes. """ # p_loc: localization confidence p_loc = torch.exp(-reg_loss) # p_cls: classification confidence p_cls = (cls_score * objectness)[:, gt_labels] # p_pos: joint confidence indicator p_pos = p_cls * p_loc # 3 is a hyper-parameter to control the contributions of high and # low confidence locations towards positive losses. confidence_weight = torch.exp(p_pos * 3) p_pos_weight = (confidence_weight * center_prior_weights) / ( (confidence_weight * center_prior_weights).sum( 0, keepdim=True)).clamp(min=EPS) reweighted_p_pos = (p_pos * p_pos_weight).sum(0) pos_loss = F.binary_cross_entropy( reweighted_p_pos, torch.ones_like(reweighted_p_pos), reduction='none') pos_loss = pos_loss.sum() * self.pos_loss_weight return pos_loss, def get_neg_loss_single(self, cls_score, objectness, gt_labels, ious, inside_gt_bbox_mask): """Calculate the negative loss of all points in feature map. Args: cls_score (Tensor): All category scores for each point on the feature map. The shape is (num_points, num_class). objectness (Tensor): Foreground probability of all points and is shape of (num_points, 1). gt_labels (Tensor): The zeros based label of all gt with shape of (num_gt). ious (Tensor): Float tensor with shape of (num_points, num_gt). Each value represent the iou of pred_bbox and gt_bboxes. inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. Returns: tuple[Tensor]: - neg_loss (Tensor): The negative loss of all points in the feature map. """ num_gts = len(gt_labels) joint_conf = (cls_score * objectness) p_neg_weight = torch.ones_like(joint_conf) if num_gts > 0: # the order of dinmension would affect the value of # p_neg_weight, we strictly follow the original # implementation. inside_gt_bbox_mask = inside_gt_bbox_mask.permute(1, 0) ious = ious.permute(1, 0) foreground_idxs = torch.nonzero(inside_gt_bbox_mask, as_tuple=True) temp_weight = (1 / (1 - ious[foreground_idxs]).clamp_(EPS)) def normalize(x): return (x - x.min() + EPS) / (x.max() - x.min() + EPS) for instance_idx in range(num_gts): idxs = foreground_idxs[0] == instance_idx if idxs.any(): temp_weight[idxs] = normalize(temp_weight[idxs]) p_neg_weight[foreground_idxs[1], gt_labels[foreground_idxs[0]]] = 1 - temp_weight logits = (joint_conf * p_neg_weight) neg_loss = ( logits**2 * F.binary_cross_entropy( logits, torch.zeros_like(logits), reduction='none')) neg_loss = neg_loss.sum() * self.neg_loss_weight return neg_loss, @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) def loss(self, cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. objectnesses (list[Tensor]): objectness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(objectnesses) all_num_gt = sum([len(item) for item in gt_bboxes]) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) inside_gt_bbox_mask_list, bbox_targets_list = self.get_targets( all_level_points, gt_bboxes) center_prior_weight_list = [] temp_inside_gt_bbox_mask_list = [] for gt_bboxe, gt_label, inside_gt_bbox_mask in zip( gt_bboxes, gt_labels, inside_gt_bbox_mask_list): center_prior_weight, inside_gt_bbox_mask = \ self.center_prior(all_level_points, gt_bboxe, gt_label, inside_gt_bbox_mask) center_prior_weight_list.append(center_prior_weight) temp_inside_gt_bbox_mask_list.append(inside_gt_bbox_mask) inside_gt_bbox_mask_list = temp_inside_gt_bbox_mask_list mlvl_points = torch.cat(all_level_points, dim=0) bbox_preds = levels_to_images(bbox_preds) cls_scores = levels_to_images(cls_scores) objectnesses = levels_to_images(objectnesses) reg_loss_list = [] ious_list = [] num_points = len(mlvl_points) for bbox_pred, encoded_targets, inside_gt_bbox_mask in zip( bbox_preds, bbox_targets_list, inside_gt_bbox_mask_list): temp_num_gt = encoded_targets.size(1) expand_mlvl_points = mlvl_points[:, None, :].expand( num_points, temp_num_gt, 2).reshape(-1, 2) encoded_targets = encoded_targets.reshape(-1, 4) expand_bbox_pred = bbox_pred[:, None, :].expand( num_points, temp_num_gt, 4).reshape(-1, 4) decoded_bbox_preds = self.bbox_coder.decode( expand_mlvl_points, expand_bbox_pred) decoded_target_preds = self.bbox_coder.decode( expand_mlvl_points, encoded_targets) with torch.no_grad(): ious = bbox_overlaps( decoded_bbox_preds, decoded_target_preds, is_aligned=True) ious = ious.reshape(num_points, temp_num_gt) if temp_num_gt: ious = ious.max( dim=-1, keepdim=True).values.repeat(1, temp_num_gt) else: ious = ious.new_zeros(num_points, temp_num_gt) ious[~inside_gt_bbox_mask] = 0 ious_list.append(ious) loss_bbox = self.loss_bbox( decoded_bbox_preds, decoded_target_preds, weight=None, reduction_override='none') reg_loss_list.append(loss_bbox.reshape(num_points, temp_num_gt)) cls_scores = [item.sigmoid() for item in cls_scores] objectnesses = [item.sigmoid() for item in objectnesses] pos_loss_list, = multi_apply(self.get_pos_loss_single, cls_scores, objectnesses, reg_loss_list, gt_labels, center_prior_weight_list) pos_avg_factor = reduce_mean( bbox_pred.new_tensor(all_num_gt)).clamp_(min=1) pos_loss = sum(pos_loss_list) / pos_avg_factor neg_loss_list, = multi_apply(self.get_neg_loss_single, cls_scores, objectnesses, gt_labels, ious_list, inside_gt_bbox_mask_list) neg_avg_factor = sum(item.data.sum() for item in center_prior_weight_list) neg_avg_factor = reduce_mean(neg_avg_factor).clamp_(min=1) neg_loss = sum(neg_loss_list) / neg_avg_factor center_loss = [] for i in range(len(img_metas)): if inside_gt_bbox_mask_list[i].any(): center_loss.append( len(gt_bboxes[i]) / center_prior_weight_list[i].sum().clamp_(min=EPS)) # when width or height of gt_bbox is smaller than stride of p3 else: center_loss.append(center_prior_weight_list[i].sum() * 0) center_loss = torch.stack(center_loss).mean() * self.center_loss_weight # avoid dead lock in DDP if all_num_gt == 0: pos_loss = bbox_preds[0].sum() * 0 dummy_center_prior_loss = self.center_prior.mean.sum( ) * 0 + self.center_prior.sigma.sum() * 0 center_loss = objectnesses[0].sum() * 0 + dummy_center_prior_loss loss = dict( loss_pos=pos_loss, loss_neg=neg_loss, loss_center=center_loss) return loss def get_targets(self, points, gt_bboxes_list): """Compute regression targets and each point inside or outside gt_bbox in multiple images. Args: points (list[Tensor]): Points of all fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). Returns: tuple(list[Tensor]): - inside_gt_bbox_mask_list (list[Tensor]): Each Tensor is with bool type and shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each level. Each tensor has shape (num_points, num_gt, 4). """ concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl inside_gt_bbox_mask_list, bbox_targets_list = multi_apply( self._get_target_single, gt_bboxes_list, points=concat_points) return inside_gt_bbox_mask_list, bbox_targets_list def _get_target_single(self, gt_bboxes, points): """Compute regression targets and each point inside or outside gt_bbox for a single image. Args: gt_bboxes (Tensor): gt_bbox of single image, has shape (num_gt, 4). points (Tensor): Points of all fpn level, has shape (num_points, 2). Returns: tuple[Tensor]: Containing the following Tensors: - inside_gt_bbox_mask (Tensor): Bool tensor with shape (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. - bbox_targets (Tensor): BBox targets of each points with each gt_bboxes, has shape (num_points, num_gt, 4). """ num_points = points.size(0) num_gts = gt_bboxes.size(0) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None] ys = ys[:, None] left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if num_gts: inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 else: inside_gt_bbox_mask = bbox_targets.new_zeros((num_points, num_gts), dtype=torch.bool) return inside_gt_bbox_mask, bbox_targets def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Almost the same as the implementation in fcos, we remove half stride offset to align with the original implementation. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `AutoAssignHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map ' 'with `self.prior_generator.single_level_grid_priors` ') y, x = super(FCOSHead, self)._get_points_single(featmap_size, stride, dtype, device) points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), dim=-1) return points ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/base_dense_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch from mmcv.cnn.utils.weight_init import constant_init from mmcv.ops import batched_nms from mmcv.runner import BaseModule, force_fp32 from mmdet.core.utils import filter_scores_and_topk, select_single_mlvl class BaseDenseHead(BaseModule, metaclass=ABCMeta): """Base class for DenseHeads.""" def __init__(self, init_cfg=None): super(BaseDenseHead, self).__init__(init_cfg) def init_weights(self): super(BaseDenseHead, self).init_weights() # avoid init_cfg overwrite the initialization of `conv_offset` for m in self.modules(): # DeformConv2dPack, ModulatedDeformConv2dPack if hasattr(m, 'conv_offset'): constant_init(m.conv_offset, 0) @abstractmethod def loss(self, **kwargs): """Compute losses of the head.""" pass @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, score_factors=None, img_metas=None, cfg=None, rescale=False, with_nms=True, **kwargs): """Transform network outputs of a batch into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Default None. img_metas (list[dict], Optional): Image meta info. Default None. cfg (mmcv.Config, Optional): Test / postprocessing configuration, if None, test_cfg would be used. Default None. rescale (bool): If True, return boxes in original image space. Default False. with_nms (bool): If True, do nms before return boxes. Default True. Returns: list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ assert len(cls_scores) == len(bbox_preds) if score_factors is None: # e.g. Retina, FreeAnchor, Foveabox, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, AutoAssign, etc. with_score_factors = True assert len(cls_scores) == len(score_factors) num_levels = len(cls_scores) featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device) result_list = [] for img_id in range(len(img_metas)): img_meta = img_metas[img_id] cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) if with_score_factors: score_factor_list = select_single_mlvl(score_factors, img_id) else: score_factor_list = [None for _ in range(num_levels)] results = self._get_bboxes_single(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale, with_nms, **kwargs) result_list.append(results) return result_list def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ if score_factor_list[0] is None: # e.g. Retina, FreeAnchor, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] if with_score_factors: mlvl_score_factors = [] else: mlvl_score_factors = None for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) if with_score_factors: score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = cls_score.softmax(-1)[:, :-1] # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, keep_idxs, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] if with_score_factors: score_factor = score_factor[keep_idxs] bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) if with_score_factors: mlvl_score_factors.append(score_factor) return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale, with_nms, mlvl_score_factors, **kwargs) def _bbox_post_process(self, mlvl_scores, mlvl_labels, mlvl_bboxes, scale_factor, cfg, rescale=False, with_nms=True, mlvl_score_factors=None, **kwargs): """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually `with_nms` is False is used for aug test. Args: mlvl_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_bboxes, ). mlvl_labels (list[Tensor]): Box class labels from all scale levels of a single image, each item has shape (num_bboxes, ). mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale levels of a single image, each item has shape (num_bboxes, 4). scale_factor (ndarray, optional): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. mlvl_score_factors (list[Tensor], optional): Score factor from all scale levels of a single image, each item has shape (num_bboxes, ). Default: None. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ assert len(mlvl_scores) == len(mlvl_bboxes) == len(mlvl_labels) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) mlvl_labels = torch.cat(mlvl_labels) if mlvl_score_factors is not None: # TODO: Add sqrt operation in order to be consistent with # the paper. mlvl_score_factors = torch.cat(mlvl_score_factors) mlvl_scores = mlvl_scores * mlvl_score_factors if with_nms: if mlvl_bboxes.numel() == 0: det_bboxes = torch.cat([mlvl_bboxes, mlvl_scores[:, None]], -1) return det_bboxes, mlvl_labels det_bboxes, keep_idxs = batched_nms(mlvl_bboxes, mlvl_scores, mlvl_labels, cfg.nms) det_bboxes = det_bboxes[:cfg.max_per_img] det_labels = mlvl_labels[keep_idxs][:cfg.max_per_img] return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores, mlvl_labels def forward_train(self, x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None, **kwargs): """ Args: x (list[Tensor]): Features from FPN. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used Returns: tuple: losses: (dict[str, Tensor]): A dictionary of loss components. proposal_list (list[Tensor]): Proposals of each image. """ outs = self(x) if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) if proposal_cfg is None: return losses else: proposal_list = self.get_bboxes( *outs, img_metas=img_metas, cfg=proposal_cfg) return losses, proposal_list def simple_test(self, feats, img_metas, rescale=False): """Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n, ). """ return self.simple_test_bboxes(feats, img_metas, rescale=rescale) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def onnx_export(self, cls_scores, bbox_preds, score_factors=None, img_metas=None, with_nms=True): """Transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). score_factors (list[Tensor]): score_factors for each s cale level with shape (N, num_points * 1, H, W). Default: None. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. Default: None. with_nms (bool): Whether apply nms to the bboxes. Default: True. Returns: tuple[Tensor, Tensor] | list[tuple]: When `with_nms` is True, it is tuple[Tensor, Tensor], first tensor bboxes with shape [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score) and second element is class labels of shape [N, num_det]. When `with_nms` is False, first tensor is bboxes with shape [N, num_det, 4], second tensor is raw score has shape [N, num_det, num_classes]. """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)] mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] assert len( img_metas ) == 1, 'Only support one input image while in exporting to ONNX' img_shape = img_metas[0]['img_shape_for_onnx'] cfg = self.test_cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors) device = cls_scores[0].device batch_size = cls_scores[0].shape[0] # convert to tensor to keep tracing nms_pre_tensor = torch.tensor( cfg.get('nms_pre', -1), device=device, dtype=torch.long) # e.g. Retina, FreeAnchor, etc. if score_factors is None: with_score_factors = False mlvl_score_factor = [None for _ in range(num_levels)] else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True mlvl_score_factor = [ score_factors[i].detach() for i in range(num_levels) ] mlvl_score_factors = [] mlvl_batch_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, score_factors, priors in zip( mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor, mlvl_priors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1, self.cls_out_channels) if self.use_sigmoid_cls: scores = scores.sigmoid() nms_pre_score = scores else: scores = scores.softmax(-1) nms_pre_score = scores if with_score_factors: score_factors = score_factors.permute(0, 2, 3, 1).reshape( batch_size, -1).sigmoid() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4) priors = priors.expand(batch_size, -1, priors.size(-1)) # Get top-k predictions from mmdet.core.export import get_k_for_topk nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) if nms_pre > 0: if with_score_factors: nms_pre_score = (nms_pre_score * score_factors[..., None]) else: nms_pre_score = nms_pre_score # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = nms_pre_score.max(-1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = nms_pre_score[..., :-1].max(-1) _, topk_inds = max_scores.topk(nms_pre) batch_inds = torch.arange( batch_size, device=bbox_pred.device).view( -1, 1).expand_as(topk_inds).long() # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = bbox_pred.shape[1] * batch_inds + topk_inds priors = priors.reshape( -1, priors.size(-1))[transformed_inds, :].reshape( batch_size, -1, priors.size(-1)) bbox_pred = bbox_pred.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) scores = scores.reshape( -1, self.cls_out_channels)[transformed_inds, :].reshape( batch_size, -1, self.cls_out_channels) if with_score_factors: score_factors = score_factors.reshape( -1, 1)[transformed_inds].reshape(batch_size, -1) bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) mlvl_batch_bboxes.append(bboxes) mlvl_scores.append(scores) if with_score_factors: mlvl_score_factors.append(score_factors) batch_bboxes = torch.cat(mlvl_batch_bboxes, dim=1) batch_scores = torch.cat(mlvl_scores, dim=1) if with_score_factors: batch_score_factors = torch.cat(mlvl_score_factors, dim=1) # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment from mmdet.core.export import add_dummy_nms_for_onnx if not self.use_sigmoid_cls: batch_scores = batch_scores[..., :self.num_classes] if with_score_factors: batch_scores = batch_scores * (batch_score_factors.unsqueeze(2)) if with_nms: max_output_boxes_per_class = cfg.nms.get( 'max_output_boxes_per_class', 200) iou_threshold = cfg.nms.get('iou_threshold', 0.5) score_threshold = cfg.score_thr nms_pre = cfg.get('deploy_nms_pre', -1) return add_dummy_nms_for_onnx(batch_bboxes, batch_scores, max_output_boxes_per_class, iou_threshold, score_threshold, nms_pre, cfg.max_per_img) else: return batch_bboxes, batch_scores ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/base_mask_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule class BaseMaskHead(BaseModule, metaclass=ABCMeta): """Base class for mask heads used in One-Stage Instance Segmentation.""" def __init__(self, init_cfg): super(BaseMaskHead, self).__init__(init_cfg) @abstractmethod def loss(self, **kwargs): pass @abstractmethod def get_results(self, **kwargs): """Get precessed :obj:`InstanceData` of multiple images.""" pass def forward_train(self, x, gt_labels, gt_masks, img_metas, gt_bboxes=None, gt_bboxes_ignore=None, positive_infos=None, **kwargs): """ Args: x (list[Tensor] | tuple[Tensor]): Features from FPN. Each has a shape (B, C, H, W). gt_labels (list[Tensor]): Ground truth labels of all images. each has a shape (num_gts,). gt_masks (list[Tensor]) : Masks for each bbox, has a shape (num_gts, h , w). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (list[Tensor]): Ground truth bboxes of the image, each item has a shape (num_gts, 4). gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be ignored, each item has a shape (num_ignored_gts, 4). positive_infos (list[:obj:`InstanceData`], optional): Information of positive samples. Used when the label assignment is done outside the MaskHead, e.g., in BboxHead in YOLACT or CondInst, etc. When the label assignment is done in MaskHead, it would be None, like SOLO. All values in it should have shape (num_positive_samples, *). Returns: dict[str, Tensor]: A dictionary of loss components. """ if positive_infos is None: outs = self(x) else: outs = self(x, positive_infos) assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \ 'even if only one item is returned' loss = self.loss( *outs, gt_labels=gt_labels, gt_masks=gt_masks, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, positive_infos=positive_infos, **kwargs) return loss def simple_test(self, feats, img_metas, rescale=False, instances_list=None, **kwargs): """Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. instances_list (list[obj:`InstanceData`], optional): Detection results of each image after the post process. Only exist if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc. Returns: list[obj:`InstanceData`]: Instance segmentation \ results of each image after the post process. \ Each item usually contains following keys. \ - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Has a shape (num_instances,). - masks (Tensor): Processed mask results, has a shape (num_instances, h, w). """ if instances_list is None: outs = self(feats) else: outs = self(feats, instances_list=instances_list) mask_inputs = outs + (img_metas, ) results_list = self.get_results( *mask_inputs, rescale=rescale, instances_list=instances_list, **kwargs) return results_list def onnx_export(self, img, img_metas): raise NotImplementedError(f'{self.__class__.__name__} does ' f'not support ONNX EXPORT') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/cascade_rpn_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from __future__ import division import copy import warnings import torch import torch.nn as nn from mmcv import ConfigDict from mmcv.ops import DeformConv2d, batched_nms from mmcv.runner import BaseModule, ModuleList from mmdet.core import (RegionAssigner, build_assigner, build_sampler, images_to_levels, multi_apply) from mmdet.core.utils import select_single_mlvl from ..builder import HEADS, build_head from .base_dense_head import BaseDenseHead from .rpn_head import RPNHead class AdaptiveConv(BaseModule): """AdaptiveConv used to adapt the sampling location with the anchors. Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the conv kernel. Default: 3 stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 1 dilation (int or tuple, optional): Spacing between kernel elements. Default: 3 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If set True, adds a learnable bias to the output. Default: False. type (str, optional): Type of adaptive conv, can be either 'offset' (arbitrary anchors) or 'dilation' (uniform anchor). Default: 'dilation'. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=3, groups=1, bias=False, type='dilation', init_cfg=dict( type='Normal', std=0.01, override=dict(name='conv'))): super(AdaptiveConv, self).__init__(init_cfg) assert type in ['offset', 'dilation'] self.adapt_type = type assert kernel_size == 3, 'Adaptive conv only supports kernels 3' if self.adapt_type == 'offset': assert stride == 1 and padding == 1 and groups == 1, \ 'Adaptive conv offset mode only supports padding: {1}, ' \ f'stride: {1}, groups: {1}' self.conv = DeformConv2d( in_channels, out_channels, kernel_size, padding=padding, stride=stride, groups=groups, bias=bias) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, padding=dilation, dilation=dilation) def forward(self, x, offset): """Forward function.""" if self.adapt_type == 'offset': N, _, H, W = x.shape assert offset is not None assert H * W == offset.shape[1] # reshape [N, NA, 18] to (N, 18, H, W) offset = offset.permute(0, 2, 1).reshape(N, -1, H, W) offset = offset.contiguous() x = self.conv(x, offset) else: assert offset is None x = self.conv(x) return x @HEADS.register_module() class StageCascadeRPNHead(RPNHead): """Stage of CascadeRPNHead. Args: in_channels (int): Number of channels in the input feature map. anchor_generator (dict): anchor generator config. adapt_cfg (dict): adaptation config. bridged_feature (bool, optional): whether update rpn feature. Default: False. with_cls (bool, optional): whether use classification branch. Default: True. sampling (bool, optional): whether use sampling. Default: True. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[1.0], strides=[4, 8, 16, 32, 64]), adapt_cfg=dict(type='dilation', dilation=3), bridged_feature=False, with_cls=True, sampling=True, init_cfg=None, **kwargs): self.with_cls = with_cls self.anchor_strides = anchor_generator['strides'] self.anchor_scales = anchor_generator['scales'] self.bridged_feature = bridged_feature self.adapt_cfg = adapt_cfg super(StageCascadeRPNHead, self).__init__( in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) # override sampling and sampler self.sampling = sampling if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) if init_cfg is None: self.init_cfg = dict( type='Normal', std=0.01, override=[dict(name='rpn_reg')]) if self.with_cls: self.init_cfg['override'].append(dict(name='rpn_cls')) def _init_layers(self): """Init layers of a CascadeRPN stage.""" self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels, **self.adapt_cfg) if self.with_cls: self.rpn_cls = nn.Conv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 1) self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) self.relu = nn.ReLU(inplace=True) def forward_single(self, x, offset): """Forward function of single scale.""" bridged_x = x x = self.relu(self.rpn_conv(x, offset)) if self.bridged_feature: bridged_x = x # update feature cls_score = self.rpn_cls(x) if self.with_cls else None bbox_pred = self.rpn_reg(x) return bridged_x, cls_score, bbox_pred def forward(self, feats, offset_list=None): """Forward function.""" if offset_list is None: offset_list = [None for _ in range(len(feats))] return multi_apply(self.forward_single, feats, offset_list) def _region_targets_single(self, anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, featmap_sizes, label_channels=1): """Get anchor targets based on region for single level.""" assign_result = self.assigner.assign( anchors, valid_flags, gt_bboxes, img_meta, featmap_sizes, self.anchor_scales[0], self.anchor_strides, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=None, allowed_border=self.train_cfg.allowed_border) flat_anchors = torch.cat(anchors) sampling_result = self.sampler.sample(assign_result, flat_anchors, gt_bboxes) num_anchors = flat_anchors.shape[0] bbox_targets = torch.zeros_like(flat_anchors) bbox_weights = torch.zeros_like(flat_anchors) labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long) label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: labels[pos_inds] = 1 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def region_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, featmap_sizes, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """See :func:`StageCascadeRPNHead.get_targets`.""" num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._region_targets_single, anchor_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, featmap_sizes=featmap_sizes, label_channels=label_channels) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def get_targets(self, anchor_list, valid_flag_list, gt_bboxes, img_metas, featmap_sizes, gt_bboxes_ignore=None, label_channels=1): """Compute regression and classification targets for anchors. Args: anchor_list (list[list]): Multi level anchors of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. featmap_sizes (list[Tensor]): Feature mapsize each level gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images label_channels (int): Channel of label. Returns: cls_reg_targets (tuple) """ if isinstance(self.assigner, RegionAssigner): cls_reg_targets = self.region_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, featmap_sizes, gt_bboxes_ignore_list=gt_bboxes_ignore, label_channels=label_channels) else: cls_reg_targets = super(StageCascadeRPNHead, self).get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, label_channels=label_channels) return cls_reg_targets def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes): """ Get offset for deformable conv based on anchor shape NOTE: currently support deformable kernel_size=3 and dilation=1 Args: anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of multi-level anchors anchor_strides (list[int]): anchor stride of each level Returns: offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv kernel. """ def _shape_offset(anchors, stride, ks=3, dilation=1): # currently support kernel_size=3 and dilation=1 assert ks == 3 and dilation == 1 pad = (ks - 1) // 2 idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device) yy, xx = torch.meshgrid(idx, idx) # return order matters xx = xx.reshape(-1) yy = yy.reshape(-1) w = (anchors[:, 2] - anchors[:, 0]) / stride h = (anchors[:, 3] - anchors[:, 1]) / stride w = w / (ks - 1) - dilation h = h / (ks - 1) - dilation offset_x = w[:, None] * xx # (NA, ks**2) offset_y = h[:, None] * yy # (NA, ks**2) return offset_x, offset_y def _ctr_offset(anchors, stride, featmap_size): feat_h, feat_w = featmap_size assert len(anchors) == feat_h * feat_w x = (anchors[:, 0] + anchors[:, 2]) * 0.5 y = (anchors[:, 1] + anchors[:, 3]) * 0.5 # compute centers on feature map x = x / stride y = y / stride # compute predefine centers xx = torch.arange(0, feat_w, device=anchors.device) yy = torch.arange(0, feat_h, device=anchors.device) yy, xx = torch.meshgrid(yy, xx) xx = xx.reshape(-1).type_as(x) yy = yy.reshape(-1).type_as(y) offset_x = x - xx # (NA, ) offset_y = y - yy # (NA, ) return offset_x, offset_y num_imgs = len(anchor_list) num_lvls = len(anchor_list[0]) dtype = anchor_list[0][0].dtype device = anchor_list[0][0].device num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] offset_list = [] for i in range(num_imgs): mlvl_offset = [] for lvl in range(num_lvls): c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl], anchor_strides[lvl], featmap_sizes[lvl]) s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl], anchor_strides[lvl]) # offset = ctr_offset + shape_offset offset_x = s_offset_x + c_offset_x[:, None] offset_y = s_offset_y + c_offset_y[:, None] # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9) offset = torch.stack([offset_y, offset_x], dim=-1) offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2] mlvl_offset.append(offset) offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2] offset_list = images_to_levels(offset_list, num_level_anchors) return offset_list def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Loss function on single scale.""" # classification loss if self.with_cls: labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # regression loss bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. anchors = anchors.reshape(-1, 4) bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_reg = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) if self.with_cls: return loss_cls, loss_reg return None, loss_reg def loss(self, anchor_list, valid_flag_list, cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: anchor_list (list[list]): Multi level anchors of each image. cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds] label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, featmap_sizes, gt_bboxes_ignore=gt_bboxes_ignore, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets if self.sampling: num_total_samples = num_total_pos + num_total_neg else: # 200 is hard-coded average factor, # which follows guided anchoring. num_total_samples = sum([label.numel() for label in labels_list]) / 200.0 # change per image, per level anchor_list to per_level, per_image mlvl_anchor_list = list(zip(*anchor_list)) # concat mlvl_anchor_list mlvl_anchor_list = [ torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list ] losses = multi_apply( self.loss_single, cls_scores, bbox_preds, mlvl_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) if self.with_cls: return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1]) return dict(loss_rpn_reg=losses[1]) def get_bboxes(self, anchor_list, cls_scores, bbox_preds, img_metas, cfg, rescale=False): """Get proposal predict. Args: anchor_list (list[list]): Multi level anchors of each image. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). img_metas (list[dict], Optional): Image meta info. Default None. cfg (mmcv.Config, Optional): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ assert len(cls_scores) == len(bbox_preds) result_list = [] for img_id in range(len(img_metas)): cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, anchor_list[img_id], img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): """Transform outputs of a single image into bbox predictions. Args: cls_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_anchors * 4, H, W). mlvl_anchors (list[Tensor]): Box reference from all scale levels of a single image, each item has shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default False. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) # bboxes from different level should be independent during NMS, # level_ids are used as labels for batched NMS to separate them level_ids = [] mlvl_scores = [] mlvl_bbox_preds = [] mlvl_valid_anchors = [] nms_pre = cfg.get('nms_pre', -1) for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # We set FG labels to [0, num_class-1] and BG label to # num_class in RPN head since mmdet v2.5, which is unified to # be consistent with other head since mmdet v2.0. In mmdet v2.0 # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. scores = rpn_cls_score.softmax(dim=1)[:, 0] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) anchors = mlvl_anchors[idx] if 0 < nms_pre < scores.shape[0]: # sort is faster than topk # _, topk_inds = scores.topk(cfg.nms_pre) ranked_scores, rank_inds = scores.sort(descending=True) topk_inds = rank_inds[:nms_pre] scores = ranked_scores[:nms_pre] rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] mlvl_scores.append(scores) mlvl_bbox_preds.append(rpn_bbox_pred) mlvl_valid_anchors.append(anchors) level_ids.append( scores.new_full((scores.size(0), ), idx, dtype=torch.long)) scores = torch.cat(mlvl_scores) anchors = torch.cat(mlvl_valid_anchors) rpn_bbox_pred = torch.cat(mlvl_bbox_preds) proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_shape) ids = torch.cat(level_ids) if cfg.min_bbox_size >= 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): proposals = proposals[valid_mask] scores = scores[valid_mask] ids = ids[valid_mask] # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You ' \ f'set max_num and ' \ f'max_per_img at the same time, but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ 'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \ f' iou_threshold in nms and ' \ f'nms_thr at the same time, but get' \ f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the nms_thr ' \ f'which will be deprecated.' if proposals.numel() > 0: dets, _ = batched_nms(proposals, scores, ids, cfg.nms) else: return proposals.new_zeros(0, 5) return dets[:cfg.max_per_img] def refine_bboxes(self, anchor_list, bbox_preds, img_metas): """Refine bboxes through stages.""" num_levels = len(bbox_preds) new_anchor_list = [] for img_id in range(len(img_metas)): mlvl_anchors = [] for i in range(num_levels): bbox_pred = bbox_preds[i][img_id].detach() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) img_shape = img_metas[img_id]['img_shape'] bboxes = self.bbox_coder.decode(anchor_list[img_id][i], bbox_pred, img_shape) mlvl_anchors.append(bboxes) new_anchor_list.append(mlvl_anchors) return new_anchor_list @HEADS.register_module() class CascadeRPNHead(BaseDenseHead): """The CascadeRPNHead will predict more accurate region proposals, which is required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN consists of a sequence of RPNStage to progressively improve the accuracy of the detected proposals. More details can be found in ``https://arxiv.org/abs/1909.06720``. Args: num_stages (int): number of CascadeRPN stages. stages (list[dict]): list of configs to build the stages. train_cfg (list[dict]): list of configs at training time each stage. test_cfg (dict): config at testing time. """ def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=None): super(CascadeRPNHead, self).__init__(init_cfg) assert num_stages == len(stages) self.num_stages = num_stages # Be careful! Pretrained weights cannot be loaded when use # nn.ModuleList self.stages = ModuleList() for i in range(len(stages)): train_cfg_i = train_cfg[i] if train_cfg is not None else None stages[i].update(train_cfg=train_cfg_i) stages[i].update(test_cfg=test_cfg) self.stages.append(build_head(stages[i])) self.train_cfg = train_cfg self.test_cfg = test_cfg def loss(self): """loss() is implemented in StageCascadeRPNHead.""" pass def get_bboxes(self): """get_bboxes() is implemented in StageCascadeRPNHead.""" pass def forward_train(self, x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None): """Forward train function.""" assert gt_labels is None, 'RPN does not require gt_labels' featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, valid_flag_list = self.stages[0].get_anchors( featmap_sizes, img_metas, device=device) losses = dict() for i in range(self.num_stages): stage = self.stages[i] if stage.adapt_cfg['type'] == 'offset': offset_list = stage.anchor_offset(anchor_list, stage.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = stage(x, offset_list) rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred, gt_bboxes, img_metas) stage_loss = stage.loss(*rpn_loss_inputs) for name, value in stage_loss.items(): losses['s{}.{}'.format(i, name)] = value # refine boxes if i < self.num_stages - 1: anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, img_metas) if proposal_cfg is None: return losses else: proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, bbox_pred, img_metas, self.test_cfg) return losses, proposal_list def simple_test_rpn(self, x, img_metas): """Simple forward test function.""" featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, _ = self.stages[0].get_anchors( featmap_sizes, img_metas, device=device) for i in range(self.num_stages): stage = self.stages[i] if stage.adapt_cfg['type'] == 'offset': offset_list = stage.anchor_offset(anchor_list, stage.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = stage(x, offset_list) if i < self.num_stages - 1: anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, img_metas) proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, bbox_pred, img_metas, self.test_cfg) return proposal_list def aug_test_rpn(self, x, img_metas): """Augmented forward test function.""" raise NotImplementedError( 'CascadeRPNHead does not support test-time augmentation') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/centernet_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import bias_init_with_prob, normal_init from mmcv.ops import batched_nms from mmcv.runner import force_fp32 from mmdet.core import multi_apply from mmdet.models import HEADS, build_loss from mmdet.models.utils import gaussian_radius, gen_gaussian_target from ..utils.gaussian_target import (get_local_maximum, get_topk_from_heatmap, transpose_and_gather_feat) from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class CenterNetHead(BaseDenseHead, BBoxTestMixin): """Objects as Points Head. CenterHead use center_point to indicate object's position. Paper link Args: in_channel (int): Number of channel in the input feature map. feat_channel (int): Number of channel in the intermediate feature map. num_classes (int): Number of categories excluding the background category. loss_center_heatmap (dict | None): Config of center heatmap loss. Default: GaussianFocalLoss. loss_wh (dict | None): Config of wh loss. Default: L1Loss. loss_offset (dict | None): Config of offset loss. Default: L1Loss. train_cfg (dict | None): Training config. Useless in CenterNet, but we keep this variable for SingleStageDetector. Default: None. test_cfg (dict | None): Testing config of CenterNet. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channel, feat_channel, num_classes, loss_center_heatmap=dict( type='GaussianFocalLoss', loss_weight=1.0), loss_wh=dict(type='L1Loss', loss_weight=0.1), loss_offset=dict(type='L1Loss', loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=None): super(CenterNetHead, self).__init__(init_cfg) self.num_classes = num_classes self.heatmap_head = self._build_head(in_channel, feat_channel, num_classes) self.wh_head = self._build_head(in_channel, feat_channel, 2) self.offset_head = self._build_head(in_channel, feat_channel, 2) self.loss_center_heatmap = build_loss(loss_center_heatmap) self.loss_wh = build_loss(loss_wh) self.loss_offset = build_loss(loss_offset) self.train_cfg = train_cfg self.test_cfg = test_cfg self.fp16_enabled = False def _build_head(self, in_channel, feat_channel, out_channel): """Build head for each branch.""" layer = nn.Sequential( nn.Conv2d(in_channel, feat_channel, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(feat_channel, out_channel, kernel_size=1)) return layer def init_weights(self): """Initialize weights of the head.""" bias_init = bias_init_with_prob(0.1) self.heatmap_head[-1].bias.data.fill_(bias_init) for head in [self.wh_head, self.offset_head]: for m in head.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) def forward(self, feats): """Forward features. Notice CenterNet head does not use FPN. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: center_heatmap_preds (List[Tensor]): center predict heatmaps for all levels, the channels number is num_classes. wh_preds (List[Tensor]): wh predicts for all levels, the channels number is 2. offset_preds (List[Tensor]): offset predicts for all levels, the channels number is 2. """ return multi_apply(self.forward_single, feats) def forward_single(self, feat): """Forward feature of a single level. Args: feat (Tensor): Feature of a single level. Returns: center_heatmap_pred (Tensor): center predict heatmaps, the channels number is num_classes. wh_pred (Tensor): wh predicts, the channels number is 2. offset_pred (Tensor): offset predicts, the channels number is 2. """ center_heatmap_pred = self.heatmap_head(feat).sigmoid() wh_pred = self.wh_head(feat) offset_pred = self.offset_head(feat) return center_heatmap_pred, wh_pred, offset_pred @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds')) def loss(self, center_heatmap_preds, wh_preds, offset_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: center_heatmap_preds (list[Tensor]): center predict heatmaps for all levels with shape (B, num_classes, H, W). wh_preds (list[Tensor]): wh predicts for all levels with shape (B, 2, H, W). offset_preds (list[Tensor]): offset predicts for all levels with shape (B, 2, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: which has components below: - loss_center_heatmap (Tensor): loss of center heatmap. - loss_wh (Tensor): loss of hw heatmap - loss_offset (Tensor): loss of offset heatmap. """ assert len(center_heatmap_preds) == len(wh_preds) == len( offset_preds) == 1 center_heatmap_pred = center_heatmap_preds[0] wh_pred = wh_preds[0] offset_pred = offset_preds[0] target_result, avg_factor = self.get_targets(gt_bboxes, gt_labels, center_heatmap_pred.shape, img_metas[0]['pad_shape']) center_heatmap_target = target_result['center_heatmap_target'] wh_target = target_result['wh_target'] offset_target = target_result['offset_target'] wh_offset_target_weight = target_result['wh_offset_target_weight'] # Since the channel of wh_target and offset_target is 2, the avg_factor # of loss_center_heatmap is always 1/2 of loss_wh and loss_offset. loss_center_heatmap = self.loss_center_heatmap( center_heatmap_pred, center_heatmap_target, avg_factor=avg_factor) loss_wh = self.loss_wh( wh_pred, wh_target, wh_offset_target_weight, avg_factor=avg_factor * 2) loss_offset = self.loss_offset( offset_pred, offset_target, wh_offset_target_weight, avg_factor=avg_factor * 2) return dict( loss_center_heatmap=loss_center_heatmap, loss_wh=loss_wh, loss_offset=loss_offset) def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape): """Compute regression and classification targets in multiple images. Args: gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box. feat_shape (list[int]): feature map shape with value [B, _, H, W] img_shape (list[int]): image shape in [h, w] format. Returns: tuple[dict,float]: The float value is mean avg_factor, the dict has components below: - center_heatmap_target (Tensor): targets of center heatmap, \ shape (B, num_classes, H, W). - wh_target (Tensor): targets of wh predict, shape \ (B, 2, H, W). - offset_target (Tensor): targets of offset predict, shape \ (B, 2, H, W). - wh_offset_target_weight (Tensor): weights of wh and offset \ predict, shape (B, 2, H, W). """ img_h, img_w = img_shape[:2] bs, _, feat_h, feat_w = feat_shape width_ratio = float(feat_w / img_w) height_ratio = float(feat_h / img_h) center_heatmap_target = gt_bboxes[-1].new_zeros( [bs, self.num_classes, feat_h, feat_w]) wh_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) offset_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) wh_offset_target_weight = gt_bboxes[-1].new_zeros( [bs, 2, feat_h, feat_w]) for batch_id in range(bs): gt_bbox = gt_bboxes[batch_id] gt_label = gt_labels[batch_id] center_x = (gt_bbox[:, [0]] + gt_bbox[:, [2]]) * width_ratio / 2 center_y = (gt_bbox[:, [1]] + gt_bbox[:, [3]]) * height_ratio / 2 gt_centers = torch.cat((center_x, center_y), dim=1) for j, ct in enumerate(gt_centers): ctx_int, cty_int = ct.int() ctx, cty = ct scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio radius = gaussian_radius([scale_box_h, scale_box_w], min_overlap=0.3) radius = max(0, int(radius)) ind = gt_label[j] gen_gaussian_target(center_heatmap_target[batch_id, ind], [ctx_int, cty_int], radius) wh_target[batch_id, 0, cty_int, ctx_int] = scale_box_w wh_target[batch_id, 1, cty_int, ctx_int] = scale_box_h offset_target[batch_id, 0, cty_int, ctx_int] = ctx - ctx_int offset_target[batch_id, 1, cty_int, ctx_int] = cty - cty_int wh_offset_target_weight[batch_id, :, cty_int, ctx_int] = 1 avg_factor = max(1, center_heatmap_target.eq(1).sum()) target_result = dict( center_heatmap_target=center_heatmap_target, wh_target=wh_target, offset_target=offset_target, wh_offset_target_weight=wh_offset_target_weight) return target_result, avg_factor @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds')) def get_bboxes(self, center_heatmap_preds, wh_preds, offset_preds, img_metas, rescale=True, with_nms=False): """Transform network output for a batch into bbox predictions. Args: center_heatmap_preds (list[Tensor]): Center predict heatmaps for all levels with shape (B, num_classes, H, W). wh_preds (list[Tensor]): WH predicts for all levels with shape (B, 2, H, W). offset_preds (list[Tensor]): Offset predicts for all levels with shape (B, 2, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: True. with_nms (bool): If True, do nms before return boxes. Default: False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. The shape of the second tensor in the tuple is (n,), and each element represents the class label of the corresponding box. """ assert len(center_heatmap_preds) == len(wh_preds) == len( offset_preds) == 1 result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( center_heatmap_preds[0][img_id:img_id + 1, ...], wh_preds[0][img_id:img_id + 1, ...], offset_preds[0][img_id:img_id + 1, ...], img_metas[img_id], rescale=rescale, with_nms=with_nms)) return result_list def _get_bboxes_single(self, center_heatmap_pred, wh_pred, offset_pred, img_meta, rescale=False, with_nms=True): """Transform outputs of a single image into bbox results. Args: center_heatmap_pred (Tensor): Center heatmap for current level with shape (1, num_classes, H, W). wh_pred (Tensor): WH heatmap for current level with shape (1, num_classes, H, W). offset_pred (Tensor): Offset for current level with shape (1, corner_offset_channels, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor, Tensor]: The first item is an (n, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. The shape of the second tensor in the tuple is (n,), and each element represents the class label of the corresponding box. """ batch_det_bboxes, batch_labels = self.decode_heatmap( center_heatmap_pred, wh_pred, offset_pred, img_meta['batch_input_shape'], k=self.test_cfg.topk, kernel=self.test_cfg.local_maximum_kernel) det_bboxes = batch_det_bboxes.view([-1, 5]) det_labels = batch_labels.view(-1) batch_border = det_bboxes.new_tensor(img_meta['border'])[..., [2, 0, 2, 0]] det_bboxes[..., :4] -= batch_border if rescale: det_bboxes[..., :4] /= det_bboxes.new_tensor( img_meta['scale_factor']) if with_nms: det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels, self.test_cfg) return det_bboxes, det_labels def decode_heatmap(self, center_heatmap_pred, wh_pred, offset_pred, img_shape, k=100, kernel=3): """Transform outputs into detections raw bbox prediction. Args: center_heatmap_pred (Tensor): center predict heatmap, shape (B, num_classes, H, W). wh_pred (Tensor): wh predict, shape (B, 2, H, W). offset_pred (Tensor): offset predict, shape (B, 2, H, W). img_shape (list[int]): image shape in [h, w] format. k (int): Get top k center keypoints from heatmap. Default 100. kernel (int): Max pooling kernel for extract local maximum pixels. Default 3. Returns: tuple[torch.Tensor]: Decoded output of CenterNetHead, containing the following Tensors: - batch_bboxes (Tensor): Coords of each box with shape (B, k, 5) - batch_topk_labels (Tensor): Categories of each box with \ shape (B, k) """ height, width = center_heatmap_pred.shape[2:] inp_h, inp_w = img_shape center_heatmap_pred = get_local_maximum( center_heatmap_pred, kernel=kernel) *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap( center_heatmap_pred, k=k) batch_scores, batch_index, batch_topk_labels = batch_dets wh = transpose_and_gather_feat(wh_pred, batch_index) offset = transpose_and_gather_feat(offset_pred, batch_index) topk_xs = topk_xs + offset[..., 0] topk_ys = topk_ys + offset[..., 1] tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width) tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height) br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width) br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height) batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2) batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]), dim=-1) return batch_bboxes, batch_topk_labels def _bboxes_nms(self, bboxes, labels, cfg): if labels.numel() > 0: max_num = cfg.max_per_img bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1].contiguous(), labels, cfg.nms) if max_num > 0: bboxes = bboxes[:max_num] labels = labels[keep][:max_num] return bboxes, labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/centripetal_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule, normal_init from mmcv.ops import DeformConv2d from mmcv.runner import force_fp32 from mmdet.core import multi_apply from ..builder import HEADS, build_loss from .corner_head import CornerHead @HEADS.register_module() class CentripetalHead(CornerHead): """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection. CentripetalHead inherits from :class:`CornerHead`. It removes the embedding branch and adds guiding shift and centripetal shift branches. More details can be found in the `paper `_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Default: 2. corner_emb_channels (int): Channel of embedding vector. Default: 1. train_cfg (dict | None): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. Default: None. test_cfg (dict | None): Testing config of CornerHead. Default: None. loss_heatmap (dict | None): Config of corner heatmap loss. Default: GaussianFocalLoss. loss_embedding (dict | None): Config of corner embedding loss. Default: AssociativeEmbeddingLoss. loss_offset (dict | None): Config of corner offset loss. Default: SmoothL1Loss. loss_guiding_shift (dict): Config of guiding shift loss. Default: SmoothL1Loss. loss_centripetal_shift (dict): Config of centripetal shift loss. Default: SmoothL1Loss. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, *args, centripetal_shift_channels=2, guiding_shift_channels=2, feat_adaption_conv_kernel=3, loss_guiding_shift=dict( type='SmoothL1Loss', beta=1.0, loss_weight=0.05), loss_centripetal_shift=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' assert centripetal_shift_channels == 2, ( 'CentripetalHead only support centripetal_shift_channels == 2') self.centripetal_shift_channels = centripetal_shift_channels assert guiding_shift_channels == 2, ( 'CentripetalHead only support guiding_shift_channels == 2') self.guiding_shift_channels = guiding_shift_channels self.feat_adaption_conv_kernel = feat_adaption_conv_kernel super(CentripetalHead, self).__init__( *args, init_cfg=init_cfg, **kwargs) self.loss_guiding_shift = build_loss(loss_guiding_shift) self.loss_centripetal_shift = build_loss(loss_centripetal_shift) def _init_centripetal_layers(self): """Initialize centripetal layers. Including feature adaption deform convs (feat_adaption), deform offset prediction convs (dcn_off), guiding shift (guiding_shift) and centripetal shift ( centripetal_shift). Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_feat_adaption = nn.ModuleList() self.br_feat_adaption = nn.ModuleList() self.tl_dcn_offset = nn.ModuleList() self.br_dcn_offset = nn.ModuleList() self.tl_guiding_shift = nn.ModuleList() self.br_guiding_shift = nn.ModuleList() self.tl_centripetal_shift = nn.ModuleList() self.br_centripetal_shift = nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.br_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.tl_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.br_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.tl_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.br_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.tl_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) self.br_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) def _init_layers(self): """Initialize layers for CentripetalHead. Including two parts: CornerHead layers and CentripetalHead layers """ super()._init_layers() # using _init_layers in CornerHead self._init_centripetal_layers() def init_weights(self): super(CentripetalHead, self).init_weights() for i in range(self.num_feat_levels): normal_init(self.tl_feat_adaption[i], std=0.01) normal_init(self.br_feat_adaption[i], std=0.01) normal_init(self.tl_dcn_offset[i].conv, std=0.1) normal_init(self.br_dcn_offset[i].conv, std=0.1) _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]] _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]] _ = [ x.conv.reset_parameters() for x in self.tl_centripetal_shift[i] ] _ = [ x.conv.reset_parameters() for x in self.br_centripetal_shift[i] ] def forward_single(self, x, lvl_ind): """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. Returns: tuple[Tensor]: A tuple of CentripetalHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_guiding_shift (Tensor): Predicted top-left guiding shift heatmap. - br_guiding_shift (Tensor): Predicted bottom-right guiding shift heatmap. - tl_centripetal_shift (Tensor): Predicted top-left centripetal shift heatmap. - br_centripetal_shift (Tensor): Predicted bottom-right centripetal shift heatmap. """ tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super( ).forward_single( x, lvl_ind, return_pool=True) tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool) br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool) tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach()) br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach()) tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool, tl_dcn_offset) br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool, br_dcn_offset) tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind]( tl_feat_adaption) br_centripetal_shift = self.br_centripetal_shift[lvl_ind]( br_feat_adaption) result_list = [ tl_heat, br_heat, tl_off, br_off, tl_guiding_shift, br_guiding_shift, tl_centripetal_shift, br_centripetal_shift ] return result_list @force_fp32() def loss(self, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [left, top, right, bottom] format. gt_labels (list[Tensor]): Class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. - guiding_loss (list[Tensor]): Guiding shift losses of all feature levels. - centripetal_loss (list[Tensor]): Centripetal shift losses of all feature levels. """ targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, img_metas[0]['pad_shape'], with_corner_emb=self.with_corner_emb, with_guiding_shift=True, with_centripetal_shift=True) mlvl_targets = [targets for _ in range(self.num_feat_levels)] [det_losses, off_losses, guiding_losses, centripetal_losses ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, mlvl_targets) loss_dict = dict( det_loss=det_losses, off_loss=off_losses, guiding_loss=guiding_losses, centripetal_loss=centripetal_losses) return loss_dict def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift, br_guiding_shift, tl_centripetal_shift, br_centripetal_shift, targets): """Compute losses for single level. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_guiding_shift (Tensor): Top-left guiding shift for current level with shape (N, guiding_shift_channels, H, W). br_guiding_shift (Tensor): Bottom-right guiding shift for current level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shift (Tensor): Top-left centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shift (Tensor): Bottom-right centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's different branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - off_loss (Tensor): Corner offset loss. - guiding_loss (Tensor): Guiding shift loss. - centripetal_loss (Tensor): Centripetal shift loss. """ targets['corner_embedding'] = None det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None, None, tl_off, br_off, targets) gt_tl_guiding_shift = targets['topleft_guiding_shift'] gt_br_guiding_shift = targets['bottomright_guiding_shift'] gt_tl_centripetal_shift = targets['topleft_centripetal_shift'] gt_br_centripetal_shift = targets['bottomright_centripetal_shift'] gt_tl_heatmap = targets['topleft_heatmap'] gt_br_heatmap = targets['bottomright_heatmap'] # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_heatmap) br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_heatmap) # Guiding shift loss tl_guiding_loss = self.loss_guiding_shift( tl_guiding_shift, gt_tl_guiding_shift, tl_mask, avg_factor=tl_mask.sum()) br_guiding_loss = self.loss_guiding_shift( br_guiding_shift, gt_br_guiding_shift, br_mask, avg_factor=br_mask.sum()) guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0 # Centripetal shift loss tl_centripetal_loss = self.loss_centripetal_shift( tl_centripetal_shift, gt_tl_centripetal_shift, tl_mask, avg_factor=tl_mask.sum()) br_centripetal_loss = self.loss_centripetal_shift( br_centripetal_shift, gt_br_centripetal_shift, br_mask, avg_factor=br_mask.sum()) centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0 return det_loss, off_loss, guiding_loss, centripetal_loss @force_fp32() def get_bboxes(self, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, img_metas, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], img_metas[img_id], tl_emb=None, br_emb=None, tl_centripetal_shift=tl_centripetal_shifts[-1][ img_id:img_id + 1, :], br_centripetal_shift=br_centripetal_shifts[-1][ img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/corner_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from logging import warning from math import ceil, log import torch import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob from mmcv.ops import CornerPool, batched_nms from mmcv.runner import BaseModule, force_fp32 from mmdet.core import multi_apply from ..builder import HEADS, build_loss from ..utils import gaussian_radius, gen_gaussian_target from ..utils.gaussian_target import (gather_feat, get_local_maximum, get_topk_from_heatmap, transpose_and_gather_feat) from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin class BiCornerPool(BaseModule): """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.) Args: in_channels (int): Input channels of module. out_channels (int): Output channels of module. feat_channels (int): Feature channels of module. directions (list[str]): Directions of two CornerPools. norm_cfg (dict): Dictionary to construct and config norm layer. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, directions, feat_channels=128, out_channels=128, norm_cfg=dict(type='BN', requires_grad=True), init_cfg=None): super(BiCornerPool, self).__init__(init_cfg) self.direction1_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction2_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.aftpool_conv = ConvModule( feat_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=None) self.conv1 = ConvModule( in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.conv2 = ConvModule( in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction1_pool = CornerPool(directions[0]) self.direction2_pool = CornerPool(directions[1]) self.relu = nn.ReLU(inplace=True) def forward(self, x): """Forward features from the upstream network. Args: x (tensor): Input feature of BiCornerPool. Returns: conv2 (tensor): Output feature of BiCornerPool. """ direction1_conv = self.direction1_conv(x) direction2_conv = self.direction2_conv(x) direction1_feat = self.direction1_pool(direction1_conv) direction2_feat = self.direction2_pool(direction2_conv) aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat) conv1 = self.conv1(x) relu = self.relu(aftpool_conv + conv1) conv2 = self.conv2(relu) return conv2 @HEADS.register_module() class CornerHead(BaseDenseHead, BBoxTestMixin): """Head of CornerNet: Detecting Objects as Paired Keypoints. Code is modified from the `official github repo `_ . More details can be found in the `paper `_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. Because HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Default: 2. corner_emb_channels (int): Channel of embedding vector. Default: 1. train_cfg (dict | None): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. Default: None. test_cfg (dict | None): Testing config of CornerHead. Default: None. loss_heatmap (dict | None): Config of corner heatmap loss. Default: GaussianFocalLoss. loss_embedding (dict | None): Config of corner embedding loss. Default: AssociativeEmbeddingLoss. loss_offset (dict | None): Config of corner offset loss. Default: SmoothL1Loss. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_classes, in_channels, num_feat_levels=2, corner_emb_channels=1, train_cfg=None, test_cfg=None, loss_heatmap=dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_embedding=dict( type='AssociativeEmbeddingLoss', pull_weight=0.25, push_weight=0.25), loss_offset=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(CornerHead, self).__init__(init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.corner_emb_channels = corner_emb_channels self.with_corner_emb = self.corner_emb_channels > 0 self.corner_offset_channels = 2 self.num_feat_levels = num_feat_levels self.loss_heatmap = build_loss( loss_heatmap) if loss_heatmap is not None else None self.loss_embedding = build_loss( loss_embedding) if loss_embedding is not None else None self.loss_offset = build_loss( loss_offset) if loss_offset is not None else None self.train_cfg = train_cfg self.test_cfg = test_cfg self.fp16_enabled = False self._init_layers() def _make_layers(self, out_channels, in_channels=256, feat_channels=256): """Initialize conv sequential for CornerHead.""" return nn.Sequential( ConvModule(in_channels, feat_channels, 3, padding=1), ConvModule( feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None)) def _init_corner_kpt_layers(self): """Initialize corner keypoint layers. Including corner heatmap branch and corner offset branch. Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList() self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList() self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_pool.append( BiCornerPool( self.in_channels, ['top', 'left'], out_channels=self.in_channels)) self.br_pool.append( BiCornerPool( self.in_channels, ['bottom', 'right'], out_channels=self.in_channels)) self.tl_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.br_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.tl_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) self.br_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) def _init_corner_emb_layers(self): """Initialize corner embedding layers. Only include corner embedding branch with two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) self.br_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) def _init_layers(self): """Initialize layers for CornerHead. Including two parts: corner keypoint layers and corner embedding layers """ self._init_corner_kpt_layers() if self.with_corner_emb: self._init_corner_emb_layers() def init_weights(self): super(CornerHead, self).init_weights() bias_init = bias_init_with_prob(0.1) for i in range(self.num_feat_levels): # The initialization of parameters are different between # nn.Conv2d and ConvModule. Our experiments show that # using the original initialization of nn.Conv2d increases # the final mAP by about 0.2% self.tl_heat[i][-1].conv.reset_parameters() self.tl_heat[i][-1].conv.bias.data.fill_(bias_init) self.br_heat[i][-1].conv.reset_parameters() self.br_heat[i][-1].conv.bias.data.fill_(bias_init) self.tl_off[i][-1].conv.reset_parameters() self.br_off[i][-1].conv.reset_parameters() if self.with_corner_emb: self.tl_emb[i][-1].conv.reset_parameters() self.br_emb[i][-1].conv.reset_parameters() def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of corner heatmaps, offset heatmaps and embedding heatmaps. - tl_heats (list[Tensor]): Top-left corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - br_heats (list[Tensor]): Bottom-right corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - tl_embs (list[Tensor] | list[None]): Top-left embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - br_embs (list[Tensor] | list[None]): Bottom-right embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - tl_offs (list[Tensor]): Top-left offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. - br_offs (list[Tensor]): Bottom-right offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. """ lvl_ind = list(range(self.num_feat_levels)) return multi_apply(self.forward_single, feats, lvl_ind) def forward_single(self, x, lvl_ind, return_pool=False): """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. return_pool (bool): Return corner pool feature or not. Returns: tuple[Tensor]: A tuple of CornerHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_emb (Tensor | None): Predicted top-left embedding heatmap. None for `self.with_corner_emb == False`. - br_emb (Tensor | None): Predicted bottom-right embedding heatmap. None for `self.with_corner_emb == False`. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_pool (Tensor): Top-left corner pool feature. Not must have. - br_pool (Tensor): Bottom-right corner pool feature. Not must have. """ tl_pool = self.tl_pool[lvl_ind](x) tl_heat = self.tl_heat[lvl_ind](tl_pool) br_pool = self.br_pool[lvl_ind](x) br_heat = self.br_heat[lvl_ind](br_pool) tl_emb, br_emb = None, None if self.with_corner_emb: tl_emb = self.tl_emb[lvl_ind](tl_pool) br_emb = self.br_emb[lvl_ind](br_pool) tl_off = self.tl_off[lvl_ind](tl_pool) br_off = self.br_off[lvl_ind](br_pool) result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off] if return_pool: result_list.append(tl_pool) result_list.append(br_pool) return result_list def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape, with_corner_emb=False, with_guiding_shift=False, with_centripetal_shift=False): """Generate corner targets. Including corner heatmap, corner offset. Optional: corner embedding, corner guiding shift, centripetal shift. For CornerNet, we generate corner heatmap, corner offset and corner embedding from this function. For CentripetalNet, we generate corner heatmap, corner offset, guiding shift and centripetal shift from this function. Args: gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). feat_shape (list[int]): Shape of output feature, [batch, channel, height, width]. img_shape (list[int]): Shape of input image, [height, width, channel]. with_corner_emb (bool): Generate corner embedding target or not. Default: False. with_guiding_shift (bool): Generate guiding shift target or not. Default: False. with_centripetal_shift (bool): Generate centripetal shift target or not. Default: False. Returns: dict: Ground truth of corner heatmap, corner offset, corner embedding, guiding shift and centripetal shift. Containing the following keys: - topleft_heatmap (Tensor): Ground truth top-left corner heatmap. - bottomright_heatmap (Tensor): Ground truth bottom-right corner heatmap. - topleft_offset (Tensor): Ground truth top-left corner offset. - bottomright_offset (Tensor): Ground truth bottom-right corner offset. - corner_embedding (list[list[list[int]]]): Ground truth corner embedding. Not must have. - topleft_guiding_shift (Tensor): Ground truth top-left corner guiding shift. Not must have. - bottomright_guiding_shift (Tensor): Ground truth bottom-right corner guiding shift. Not must have. - topleft_centripetal_shift (Tensor): Ground truth top-left corner centripetal shift. Not must have. - bottomright_centripetal_shift (Tensor): Ground truth bottom-right corner centripetal shift. Not must have. """ batch_size, _, height, width = feat_shape img_h, img_w = img_shape[:2] width_ratio = float(width / img_w) height_ratio = float(height / img_h) gt_tl_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_br_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) if with_corner_emb: match = [] # Guiding shift is a kind of offset, from center to corner if with_guiding_shift: gt_tl_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) # Centripetal shift is also a kind of offset, from center to corner # and normalized by log. if with_centripetal_shift: gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) for batch_id in range(batch_size): # Ground truth of corner embedding per image is a list of coord set corner_match = [] for box_id in range(len(gt_labels[batch_id])): left, top, right, bottom = gt_bboxes[batch_id][box_id] center_x = (left + right) / 2.0 center_y = (top + bottom) / 2.0 label = gt_labels[batch_id][box_id] # Use coords in the feature level to generate ground truth scale_left = left * width_ratio scale_right = right * width_ratio scale_top = top * height_ratio scale_bottom = bottom * height_ratio scale_center_x = center_x * width_ratio scale_center_y = center_y * height_ratio # Int coords on feature map/ground truth tensor left_idx = int(min(scale_left, width - 1)) right_idx = int(min(scale_right, width - 1)) top_idx = int(min(scale_top, height - 1)) bottom_idx = int(min(scale_bottom, height - 1)) # Generate gaussian heatmap scale_box_width = ceil(scale_right - scale_left) scale_box_height = ceil(scale_bottom - scale_top) radius = gaussian_radius((scale_box_height, scale_box_width), min_overlap=0.3) radius = max(0, int(radius)) gt_tl_heatmap[batch_id, label] = gen_gaussian_target( gt_tl_heatmap[batch_id, label], [left_idx, top_idx], radius) gt_br_heatmap[batch_id, label] = gen_gaussian_target( gt_br_heatmap[batch_id, label], [right_idx, bottom_idx], radius) # Generate corner offset left_offset = scale_left - left_idx top_offset = scale_top - top_idx right_offset = scale_right - right_idx bottom_offset = scale_bottom - bottom_idx gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset gt_br_offset[batch_id, 1, bottom_idx, right_idx] = bottom_offset # Generate corner embedding if with_corner_emb: corner_match.append([[top_idx, left_idx], [bottom_idx, right_idx]]) # Generate guiding shift if with_guiding_shift: gt_tl_guiding_shift[batch_id, 0, top_idx, left_idx] = scale_center_x - left_idx gt_tl_guiding_shift[batch_id, 1, top_idx, left_idx] = scale_center_y - top_idx gt_br_guiding_shift[batch_id, 0, bottom_idx, right_idx] = right_idx - scale_center_x gt_br_guiding_shift[ batch_id, 1, bottom_idx, right_idx] = bottom_idx - scale_center_y # Generate centripetal shift if with_centripetal_shift: gt_tl_centripetal_shift[batch_id, 0, top_idx, left_idx] = log(scale_center_x - scale_left) gt_tl_centripetal_shift[batch_id, 1, top_idx, left_idx] = log(scale_center_y - scale_top) gt_br_centripetal_shift[batch_id, 0, bottom_idx, right_idx] = log(scale_right - scale_center_x) gt_br_centripetal_shift[batch_id, 1, bottom_idx, right_idx] = log(scale_bottom - scale_center_y) if with_corner_emb: match.append(corner_match) target_result = dict( topleft_heatmap=gt_tl_heatmap, topleft_offset=gt_tl_offset, bottomright_heatmap=gt_br_heatmap, bottomright_offset=gt_br_offset) if with_corner_emb: target_result.update(corner_embedding=match) if with_guiding_shift: target_result.update( topleft_guiding_shift=gt_tl_guiding_shift, bottomright_guiding_shift=gt_br_guiding_shift) if with_centripetal_shift: target_result.update( topleft_centripetal_shift=gt_tl_centripetal_shift, bottomright_centripetal_shift=gt_br_centripetal_shift) return target_result @force_fp32() def loss(self, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [left, top, right, bottom] format. gt_labels (list[Tensor]): Class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - pull_loss (list[Tensor]): Part one of AssociativeEmbedding losses of all feature levels. - push_loss (list[Tensor]): Part two of AssociativeEmbedding losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. """ targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, img_metas[0]['pad_shape'], with_corner_emb=self.with_corner_emb) mlvl_targets = [targets for _ in range(self.num_feat_levels)] det_losses, pull_losses, push_losses, off_losses = multi_apply( self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, mlvl_targets) loss_dict = dict(det_loss=det_losses, off_loss=off_losses) if self.with_corner_emb: loss_dict.update(pull_loss=pull_losses, push_loss=push_losses) return loss_dict def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off, targets): """Compute losses for single level. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_emb (Tensor): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's different branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - pull_loss (Tensor): Part one of AssociativeEmbedding loss. - push_loss (Tensor): Part two of AssociativeEmbedding loss. - off_loss (Tensor): Corner offset loss. """ gt_tl_hmp = targets['topleft_heatmap'] gt_br_hmp = targets['bottomright_heatmap'] gt_tl_off = targets['topleft_offset'] gt_br_off = targets['bottomright_offset'] gt_embedding = targets['corner_embedding'] # Detection loss tl_det_loss = self.loss_heatmap( tl_hmp.sigmoid(), gt_tl_hmp, avg_factor=max(1, gt_tl_hmp.eq(1).sum())) br_det_loss = self.loss_heatmap( br_hmp.sigmoid(), gt_br_hmp, avg_factor=max(1, gt_br_hmp.eq(1).sum())) det_loss = (tl_det_loss + br_det_loss) / 2.0 # AssociativeEmbedding loss if self.with_corner_emb and self.loss_embedding is not None: pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb, gt_embedding) else: pull_loss, push_loss = None, None # Offset loss # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_hmp) br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_hmp) tl_off_loss = self.loss_offset( tl_off, gt_tl_off, tl_off_mask, avg_factor=max(1, tl_off_mask.sum())) br_off_loss = self.loss_offset( br_off, gt_br_off, br_off_mask, avg_factor=max(1, br_off_mask.sum())) off_loss = (tl_off_loss + br_off_loss) / 2.0 return det_loss, pull_loss, push_loss, off_loss @force_fp32() def get_bboxes(self, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, img_metas, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], img_metas[img_id], tl_emb=tl_embs[-1][img_id:img_id + 1, :], br_emb=br_embs[-1][img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list def _get_bboxes_single(self, tl_heat, br_heat, tl_off, br_off, img_meta, tl_emb=None, br_emb=None, tl_centripetal_shift=None, br_centripetal_shift=None, rescale=False, with_nms=True): """Transform outputs for a single batch item into bbox predictions. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. tl_emb (Tensor): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift: Top-left corner's centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift: Bottom-right corner's centripetal shift for current level with shape (N, 2, H, W). rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ if isinstance(img_meta, (list, tuple)): img_meta = img_meta[0] batch_bboxes, batch_scores, batch_clses = self.decode_heatmap( tl_heat=tl_heat.sigmoid(), br_heat=br_heat.sigmoid(), tl_off=tl_off, br_off=br_off, tl_emb=tl_emb, br_emb=br_emb, tl_centripetal_shift=tl_centripetal_shift, br_centripetal_shift=br_centripetal_shift, img_meta=img_meta, k=self.test_cfg.corner_topk, kernel=self.test_cfg.local_maximum_kernel, distance_threshold=self.test_cfg.distance_threshold) if rescale: batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor']) bboxes = batch_bboxes.view([-1, 4]) scores = batch_scores.view(-1) clses = batch_clses.view(-1) detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1) keepinds = (detections[:, -1] > -0.1) detections = detections[keepinds] labels = clses[keepinds] if with_nms: detections, labels = self._bboxes_nms(detections, labels, self.test_cfg) return detections, labels def _bboxes_nms(self, bboxes, labels, cfg): if 'nms_cfg' in cfg: warning.warn('nms_cfg in test_cfg will be deprecated. ' 'Please rename it as nms') if 'nms' not in cfg: cfg.nms = cfg.nms_cfg if labels.numel() > 0: max_num = cfg.max_per_img bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1].contiguous(), labels, cfg.nms) if max_num > 0: bboxes = bboxes[:max_num] labels = labels[keep][:max_num] return bboxes, labels def decode_heatmap(self, tl_heat, br_heat, tl_off, br_off, tl_emb=None, br_emb=None, tl_centripetal_shift=None, br_centripetal_shift=None, img_meta=None, k=100, kernel=3, distance_threshold=0.5, num_dets=1000): """Transform outputs for a single batch item into raw bbox predictions. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_emb (Tensor | None): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor | None): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift (Tensor | None): Top-left centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift (Tensor | None): Bottom-right centripetal shift for current level with shape (N, 2, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. k (int): Get top k corner keypoints from heatmap. kernel (int): Max pooling kernel for extract local maximum pixels. distance_threshold (float): Distance threshold. Top-left and bottom-right corner keypoints with feature distance less than the threshold will be regarded as keypoints from same object. num_dets (int): Num of raw boxes before doing nms. Returns: tuple[torch.Tensor]: Decoded output of CornerHead, containing the following Tensors: - bboxes (Tensor): Coords of each box. - scores (Tensor): Scores of each box. - clses (Tensor): Categories of each box. """ with_embedding = tl_emb is not None and br_emb is not None with_centripetal_shift = ( tl_centripetal_shift is not None and br_centripetal_shift is not None) assert with_embedding + with_centripetal_shift == 1 batch, _, height, width = tl_heat.size() if torch.onnx.is_in_onnx_export(): inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2] else: inp_h, inp_w, _ = img_meta['pad_shape'] # perform nms on heatmaps tl_heat = get_local_maximum(tl_heat, kernel=kernel) br_heat = get_local_maximum(br_heat, kernel=kernel) tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap( tl_heat, k=k) br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap( br_heat, k=k) # We use repeat instead of expand here because expand is a # shallow-copy function. Thus it could cause unexpected testing result # sometimes. Using expand will decrease about 10% mAP during testing # compared to repeat. tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k) tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k) br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1) br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1) tl_off = transpose_and_gather_feat(tl_off, tl_inds) tl_off = tl_off.view(batch, k, 1, 2) br_off = transpose_and_gather_feat(br_off, br_inds) br_off = br_off.view(batch, 1, k, 2) tl_xs = tl_xs + tl_off[..., 0] tl_ys = tl_ys + tl_off[..., 1] br_xs = br_xs + br_off[..., 0] br_ys = br_ys + br_off[..., 1] if with_centripetal_shift: tl_centripetal_shift = transpose_and_gather_feat( tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp() br_centripetal_shift = transpose_and_gather_feat( br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp() tl_ctxs = tl_xs + tl_centripetal_shift[..., 0] tl_ctys = tl_ys + tl_centripetal_shift[..., 1] br_ctxs = br_xs - br_centripetal_shift[..., 0] br_ctys = br_ys - br_centripetal_shift[..., 1] # all possible boxes based on top k corners (ignoring class) tl_xs *= (inp_w / width) tl_ys *= (inp_h / height) br_xs *= (inp_w / width) br_ys *= (inp_h / height) if with_centripetal_shift: tl_ctxs *= (inp_w / width) tl_ctys *= (inp_h / height) br_ctxs *= (inp_w / width) br_ctys *= (inp_h / height) x_off, y_off = 0, 0 # no crop if not torch.onnx.is_in_onnx_export(): # since `RandomCenterCropPad` is done on CPU with numpy and it's # not dynamic traceable when exporting to ONNX, thus 'border' # does not appears as key in 'img_meta'. As a tmp solution, # we move this 'border' handle part to the postprocess after # finished exporting to ONNX, which is handle in # `mmdet/core/export/model_wrappers.py`. Though difference between # pytorch and exported onnx model, it might be ignored since # comparable performance is achieved between them (e.g. 40.4 vs # 40.6 on COCO val2017, for CornerNet without test-time flip) if 'border' in img_meta: x_off = img_meta['border'][2] y_off = img_meta['border'][0] tl_xs -= x_off tl_ys -= y_off br_xs -= x_off br_ys -= y_off zeros = tl_xs.new_zeros(*tl_xs.size()) tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros) tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros) br_xs = torch.where(br_xs > 0.0, br_xs, zeros) br_ys = torch.where(br_ys > 0.0, br_ys, zeros) bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3) area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs() if with_centripetal_shift: tl_ctxs -= x_off tl_ctys -= y_off br_ctxs -= x_off br_ctys -= y_off tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs) tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys) br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs) br_ctys *= br_ctys.gt(0.0).type_as(br_ctys) ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys), dim=3) area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs() rcentral = torch.zeros_like(ct_bboxes) # magic nums from paper section 4.1 mu = torch.ones_like(area_bboxes) / 2.4 mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2 bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2 rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) * (rcentral[..., 3] - rcentral[..., 1])).abs() dists = area_ct_bboxes / area_rcentral tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | ( ct_bboxes[..., 0] >= rcentral[..., 2]) tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | ( ct_bboxes[..., 1] >= rcentral[..., 3]) br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | ( ct_bboxes[..., 2] >= rcentral[..., 2]) br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | ( ct_bboxes[..., 3] >= rcentral[..., 3]) if with_embedding: tl_emb = transpose_and_gather_feat(tl_emb, tl_inds) tl_emb = tl_emb.view(batch, k, 1) br_emb = transpose_and_gather_feat(br_emb, br_inds) br_emb = br_emb.view(batch, 1, k) dists = torch.abs(tl_emb - br_emb) tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k) br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1) scores = (tl_scores + br_scores) / 2 # scores for all possible boxes # tl and br should have same class tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k) br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1) cls_inds = (tl_clses != br_clses) # reject boxes based on distances dist_inds = dists > distance_threshold # reject boxes based on widths and heights width_inds = (br_xs <= tl_xs) height_inds = (br_ys <= tl_ys) # No use `scores[cls_inds]`, instead we use `torch.where` here. # Since only 1-D indices with type 'tensor(bool)' are supported # when exporting to ONNX, any other bool indices with more dimensions # (e.g. 2-D bool tensor) as input parameter in node is invalid negative_scores = -1 * torch.ones_like(scores) scores = torch.where(cls_inds, negative_scores, scores) scores = torch.where(width_inds, negative_scores, scores) scores = torch.where(height_inds, negative_scores, scores) scores = torch.where(dist_inds, negative_scores, scores) if with_centripetal_shift: scores[tl_ctx_inds] = -1 scores[tl_cty_inds] = -1 scores[br_ctx_inds] = -1 scores[br_cty_inds] = -1 scores = scores.view(batch, -1) scores, inds = torch.topk(scores, num_dets) scores = scores.unsqueeze(2) bboxes = bboxes.view(batch, -1, 4) bboxes = gather_feat(bboxes, inds) clses = tl_clses.contiguous().view(batch, -1, 1) clses = gather_feat(clses, inds).float() return bboxes, scores, clses def onnx_export(self, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, img_metas, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor, Tensor]: First tensor bboxes with shape [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score) and second element is class labels of shape [N, num_det]. """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len( img_metas) == 1 result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], img_metas[img_id], tl_emb=tl_embs[-1][img_id:img_id + 1, :], br_emb=br_embs[-1][img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) detections, labels = result_list[0] # batch_size 1 here, [1, num_det, 5], [1, num_det] return detections.unsqueeze(0), labels.unsqueeze(0) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ddod_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, images_to_levels, multi_apply, reduce_mean, unmap) from mmdet.core.bbox import bbox_overlaps from ..builder import HEADS, build_loss from .anchor_head import AnchorHead EPS = 1e-12 @HEADS.register_module() class DDODHead(AnchorHead): """DDOD head decomposes conjunctions lying in most current one-stage detectors via label assignment disentanglement, spatial feature disentanglement, and pyramid supervision disentanglement. https://arxiv.org/abs/2107.02963 Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): The number of stacked Conv. Default: 4. conv_cfg (dict): Conv config of ddod head. Default: None. use_dcn (bool): Use dcn, Same as ATSS when False. Default: True. norm_cfg (dict): Normal config of ddod head. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_iou (dict): Config of IoU loss. Default: dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0). """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, use_dcn=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), loss_iou=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.use_dcn = use_dcn super(DDODHead, self).__init__(num_classes, in_channels, **kwargs) self.sampling = False if self.train_cfg: self.cls_assigner = build_assigner(self.train_cfg.assigner) self.reg_assigner = build_assigner(self.train_cfg.reg_assigner) sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.loss_iou = build_loss(loss_iou) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=dict(type='DCN', deform_groups=1) if i == 0 and self.use_dcn else self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=dict(type='DCN', deform_groups=1) if i == 0 and self.use_dcn else self.conv_cfg, norm_cfg=self.norm_cfg)) self.atss_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.atss_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.atss_iou = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) # we use the global list in loss self.cls_num_pos_samples_per_level = [ 0. for _ in range(len(self.prior_generator.strides)) ] self.reg_num_pos_samples_per_level = [ 0. for _ in range(len(self.prior_generator.strides)) ] def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) normal_init(self.atss_reg, std=0.01) normal_init(self.atss_iou, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.atss_cls, std=0.01, bias=bias_cls) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. iou_preds (list[Tensor]): IoU scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 1. """ return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: - cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single \ scale level, the channels number is num_base_priors * 4. - iou_pred (Tensor): Iou for a single scale level, the \ channel number is (N, num_base_priors * 1, H, W). """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() iou_pred = self.atss_iou(reg_feat) return cls_score, bbox_pred, iou_pred def loss_cls_single(self, cls_score, labels, label_weights, reweight_factor, num_total_samples): """Compute cls loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_base_priors * num_classes, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) reweight_factor (list[int]): Reweight factor for cls and reg loss. num_total_samples (int): Number of positive samples that is reduced over all GPUs. Returns: tuple[Tensor]: A tuple of loss components. """ cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) return reweight_factor * loss_cls, def loss_reg_single(self, anchors, bbox_pred, iou_pred, labels, label_weights, bbox_targets, bbox_weights, reweight_factor, num_total_samples): """Compute reg loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_base_priors * 4, H, W). iou_pred (Tensor): Iou for a single scale level, the channel number is (N, num_base_priors * 1, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) reweight_factor (list[int]): Reweight factor for cls and reg loss. num_total_samples (int): Number of positive samples that is reduced over all GPUs. Returns: dict[str, Tensor]: A dictionary of loss components. """ anchors = anchors.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1, ) bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) iou_targets = label_weights.new_zeros(labels.shape) iou_weights = label_weights.new_zeros(labels.shape) iou_weights[(bbox_weights.sum(axis=1) > 0).nonzero( as_tuple=False)] = 1. # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero(as_tuple=False).squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) pos_decode_bbox_targets = self.bbox_coder.decode( pos_anchors, pos_bbox_targets) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, avg_factor=num_total_samples) iou_targets[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) loss_iou = self.loss_iou( iou_pred, iou_targets, iou_weights, avg_factor=num_total_samples) else: loss_bbox = bbox_pred.sum() * 0 loss_iou = iou_pred.sum() * 0 return reweight_factor * loss_bbox, reweight_factor * loss_iou def calc_reweight_factor(self, labels_list): """Compute reweight_factor for regression and classification loss.""" # get pos samples for each level bg_class_ind = self.num_classes for ii, each_level_label in enumerate(labels_list): pos_inds = ((each_level_label >= 0) & (each_level_label < bg_class_ind)).nonzero( as_tuple=False).squeeze(1) self.cls_num_pos_samples_per_level[ii] += len(pos_inds) # get reweight factor from 1 ~ 2 with bilinear interpolation min_pos_samples = min(self.cls_num_pos_samples_per_level) max_pos_samples = max(self.cls_num_pos_samples_per_level) interval = 1. / (max_pos_samples - min_pos_samples + 1e-10) reweight_factor_per_level = [] for pos_samples in self.cls_num_pos_samples_per_level: factor = 2. - (pos_samples - min_pos_samples) * interval reweight_factor_per_level.append(factor) return reweight_factor_per_level @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_base_priors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_base_priors * 4, H, W) iou_preds (list[Tensor]): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 # calculate common vars for cls and reg assigners at once targets_com = self.process_predictions_and_anchors( anchor_list, valid_flag_list, cls_scores, bbox_preds, img_metas, gt_bboxes_ignore) (anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, gt_bboxes_ignore_list) = targets_com # classification branch assigner cls_targets = self.get_cls_targets( anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore_list, gt_labels_list=gt_labels, label_channels=label_channels) if cls_targets is None: return None (cls_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos, dtype=torch.float, device=device)).item() num_total_samples = max(num_total_samples, 1.0) reweight_factor_per_level = self.calc_reweight_factor(labels_list) cls_losses_cls, = multi_apply( self.loss_cls_single, cls_scores, labels_list, label_weights_list, reweight_factor_per_level, num_total_samples=num_total_samples) # regression branch assigner reg_targets = self.get_reg_targets( anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore_list, gt_labels_list=gt_labels, label_channels=label_channels) if reg_targets is None: return None (reg_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos, dtype=torch.float, device=device)).item() num_total_samples = max(num_total_samples, 1.0) reweight_factor_per_level = self.calc_reweight_factor(labels_list) reg_losses_bbox, reg_losses_iou = multi_apply( self.loss_reg_single, reg_anchor_list, bbox_preds, iou_preds, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, reweight_factor_per_level, num_total_samples=num_total_samples) return dict( loss_cls=cls_losses_cls, loss_bbox=reg_losses_bbox, loss_iou=reg_losses_iou) def process_predictions_and_anchors(self, anchor_list, valid_flag_list, cls_scores, bbox_preds, img_metas, gt_bboxes_ignore_list): """Compute common vars for regression and classification targets. Args: anchor_list (list[Tensor]): anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Return: tuple[Tensor]: A tuple of common loss vars. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs anchor_list_ = [] valid_flag_list_ = [] # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list_.append(torch.cat(anchor_list[i])) valid_flag_list_.append(torch.cat(valid_flag_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] num_levels = len(cls_scores) cls_score_list = [] bbox_pred_list = [] mlvl_cls_score_list = [ cls_score.permute(0, 2, 3, 1).reshape( num_imgs, -1, self.num_base_priors * self.cls_out_channels) for cls_score in cls_scores ] mlvl_bbox_pred_list = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_base_priors * 4) for bbox_pred in bbox_preds ] for i in range(num_imgs): mlvl_cls_tensor_list = [ mlvl_cls_score_list[j][i] for j in range(num_levels) ] mlvl_bbox_tensor_list = [ mlvl_bbox_pred_list[j][i] for j in range(num_levels) ] cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0) cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0) cls_score_list.append(cat_mlvl_cls_score) bbox_pred_list.append(cat_mlvl_bbox_pred) return (anchor_list_, valid_flag_list_, num_level_anchors_list, cls_score_list, bbox_pred_list, gt_bboxes_ignore_list) def get_cls_targets(self, anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get cls targets for DDOD head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. Args: anchor_list (list[Tensor]): anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. num_level_anchors_list (list[Tensor]): Number of anchors of each scale level of all image. cls_score_list (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_pred_list (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. gt_labels_list (list[Tensor]): class indices corresponding to each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Return: tuple[Tensor]: A tuple of cls targets components. """ (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, cls_score_list, bbox_pred_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs, is_cls_assigner=True) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) label_weights_list = images_to_levels(all_label_weights, num_level_anchors_list[0]) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors_list[0]) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors_list[0]) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def get_reg_targets(self, anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get reg targets for DDOD head. This method is almost the same as `AnchorHead.get_targets()` when is_cls_assigner is False. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. Args: anchor_list (list[Tensor]): anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. num_level_anchors (int): Number of anchors of each scale level. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. gt_labels_list (list[Tensor]): class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Return: tuple[Tensor]: A tuple of reg targets components. """ (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, cls_score_list, bbox_pred_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs, is_cls_assigner=False) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) label_weights_list = images_to_levels(all_label_weights, num_level_anchors_list[0]) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors_list[0]) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors_list[0]) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_anchors, valid_flags, cls_scores, bbox_preds, num_level_anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True, is_cls_assigner=True): """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_base_priors, 4). valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_base_priors,). cls_scores (Tensor): Classification scores for all scale levels of the image. bbox_preds (Tensor): Box energies / deltas for all scale levels of the image. num_level_anchors (list[int]): Number of anchors of each scale level. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, ). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts, ). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. Default: 1. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Default: True. is_cls_assigner (bool): Classification or regression. Default: True. Returns: tuple: N is the number of total anchors in the image. - labels (Tensor): Labels of all anchors in the image with \ shape (N, ). - label_weights (Tensor): Label weights of all anchor in the \ image with shape (N, ). - bbox_targets (Tensor): BBox targets of all anchors in the \ image with shape (N, 4). - bbox_weights (Tensor): BBox weights of all anchors in the \ image with shape (N, 4) - pos_inds (Tensor): Indices of positive anchor with shape \ (num_pos, ). - neg_inds (Tensor): Indices of negative anchor with shape \ (num_neg, ). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) bbox_preds_valid = bbox_preds[inside_flags, :] cls_scores_valid = cls_scores[inside_flags, :] assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner # decode prediction out of assigner bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid) assign_result = assigner.assign(anchors, num_level_anchors_inside, gt_bboxes, gt_bboxes_ignore, gt_labels, cls_scores_valid, bbox_preds_valid) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if hasattr(self, 'bbox_coder'): pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: # used in VFNetHead pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): """Get the anchors of each scale level inside. Args: num_level_anchors (list[int]): Number of anchors of each scale level. inside_flags (Tensor): Multi level inside flags of the image, which are concatenated into a single tensor of shape (num_base_priors,). Returns: list[int]: Number of anchors of each scale level inside. """ split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/deformable_detr_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Linear, bias_init_with_prob, constant_init from mmcv.runner import force_fp32 from mmdet.core import multi_apply from mmdet.models.utils.transformer import inverse_sigmoid from ..builder import HEADS from .detr_head import DETRHead @HEADS.register_module() class DeformableDETRHead(DETRHead): """Head of DeformDETR: Deformable DETR: Deformable Transformers for End-to- End Object Detection. Code is modified from the `official github repo `_. More details can be found in the `paper `_ . Args: with_box_refine (bool): Whether to refine the reference points in the decoder. Defaults to False. as_two_stage (bool) : Whether to generate the proposal from the outputs of encoder. transformer (obj:`ConfigDict`): ConfigDict is used for building the Encoder and Decoder. """ def __init__(self, *args, with_box_refine=False, as_two_stage=False, transformer=None, **kwargs): self.with_box_refine = with_box_refine self.as_two_stage = as_two_stage if self.as_two_stage: transformer['as_two_stage'] = self.as_two_stage super(DeformableDETRHead, self).__init__( *args, transformer=transformer, **kwargs) def _init_layers(self): """Initialize classification branch and regression branch of head.""" fc_cls = Linear(self.embed_dims, self.cls_out_channels) reg_branch = [] for _ in range(self.num_reg_fcs): reg_branch.append(Linear(self.embed_dims, self.embed_dims)) reg_branch.append(nn.ReLU()) reg_branch.append(Linear(self.embed_dims, 4)) reg_branch = nn.Sequential(*reg_branch) def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) # last reg_branch is used to generate proposal from # encode feature map when as_two_stage is True. num_pred = (self.transformer.decoder.num_layers + 1) if \ self.as_two_stage else self.transformer.decoder.num_layers if self.with_box_refine: self.cls_branches = _get_clones(fc_cls, num_pred) self.reg_branches = _get_clones(reg_branch, num_pred) else: self.cls_branches = nn.ModuleList( [fc_cls for _ in range(num_pred)]) self.reg_branches = nn.ModuleList( [reg_branch for _ in range(num_pred)]) if not self.as_two_stage: self.query_embedding = nn.Embedding(self.num_query, self.embed_dims * 2) def init_weights(self): """Initialize weights of the DeformDETR head.""" self.transformer.init_weights() if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) for m in self.cls_branches: nn.init.constant_(m.bias, bias_init) for m in self.reg_branches: constant_init(m[-1], 0, bias=0) nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0) if self.as_two_stage: for m in self.reg_branches: nn.init.constant_(m[-1].bias.data[2:], 0.0) def forward(self, mlvl_feats, img_metas): """Forward function. Args: mlvl_feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor with shape (N, C, H, W). img_metas (list[dict]): List of image information. Returns: all_cls_scores (Tensor): Outputs from the classification head, \ shape [nb_dec, bs, num_query, cls_out_channels]. Note \ cls_out_channels should includes background. all_bbox_preds (Tensor): Sigmoid outputs from the regression \ head with normalized coordinate format (cx, cy, w, h). \ Shape [nb_dec, bs, num_query, 4]. enc_outputs_class (Tensor): The score of each point on encode \ feature map, has shape (N, h*w, num_class). Only when \ as_two_stage is True it would be returned, otherwise \ `None` would be returned. enc_outputs_coord (Tensor): The proposal generate from the \ encode feature map, has shape (N, h*w, 4). Only when \ as_two_stage is True it would be returned, otherwise \ `None` would be returned. """ batch_size = mlvl_feats[0].size(0) input_img_h, input_img_w = img_metas[0]['batch_input_shape'] img_masks = mlvl_feats[0].new_ones( (batch_size, input_img_h, input_img_w)) for img_id in range(batch_size): img_h, img_w, _ = img_metas[img_id]['img_shape'] img_masks[img_id, :img_h, :img_w] = 0 mlvl_masks = [] mlvl_positional_encodings = [] for feat in mlvl_feats: mlvl_masks.append( F.interpolate(img_masks[None], size=feat.shape[-2:]).to(torch.bool).squeeze(0)) mlvl_positional_encodings.append( self.positional_encoding(mlvl_masks[-1])) query_embeds = None if not self.as_two_stage: query_embeds = self.query_embedding.weight hs, init_reference, inter_references, \ enc_outputs_class, enc_outputs_coord = self.transformer( mlvl_feats, mlvl_masks, query_embeds, mlvl_positional_encodings, reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 cls_branches=self.cls_branches if self.as_two_stage else None # noqa:E501 ) hs = hs.permute(0, 2, 1, 3) outputs_classes = [] outputs_coords = [] for lvl in range(hs.shape[0]): if lvl == 0: reference = init_reference else: reference = inter_references[lvl - 1] reference = inverse_sigmoid(reference) outputs_class = self.cls_branches[lvl](hs[lvl]) tmp = self.reg_branches[lvl](hs[lvl]) if reference.shape[-1] == 4: tmp += reference else: assert reference.shape[-1] == 2 tmp[..., :2] += reference outputs_coord = tmp.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) outputs_classes = torch.stack(outputs_classes) outputs_coords = torch.stack(outputs_coords) if self.as_two_stage: return outputs_classes, outputs_coords, \ enc_outputs_class, \ enc_outputs_coord.sigmoid() else: return outputs_classes, outputs_coords, \ None, None @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def loss(self, all_cls_scores, all_bbox_preds, enc_cls_scores, enc_bbox_preds, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore=None): """"Loss function. Args: all_cls_scores (Tensor): Classification score of all decoder layers, has shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds (Tensor): Sigmoid regression outputs of all decode layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. enc_cls_scores (Tensor): Classification scores of points on encode feature map , has shape (N, h*w, num_classes). Only be passed when as_two_stage is True, otherwise is None. enc_bbox_preds (Tensor): Regression results of each points on the encode feature map, has shape (N, h*w, 4). Only be passed when as_two_stage is True, otherwise is None. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (num_gts, ). img_metas (list[dict]): List of image meta information. gt_bboxes_ignore (list[Tensor], optional): Bounding boxes which can be ignored for each image. Default None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert gt_bboxes_ignore is None, \ f'{self.__class__.__name__} only supports ' \ f'for gt_bboxes_ignore setting to None.' num_dec_layers = len(all_cls_scores) all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] all_gt_bboxes_ignore_list = [ gt_bboxes_ignore for _ in range(num_dec_layers) ] img_metas_list = [img_metas for _ in range(num_dec_layers)] losses_cls, losses_bbox, losses_iou = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_gt_bboxes_list, all_gt_labels_list, img_metas_list, all_gt_bboxes_ignore_list) loss_dict = dict() # loss of proposal generated from encode feature map. if enc_cls_scores is not None: binary_labels_list = [ torch.zeros_like(gt_labels_list[i]) for i in range(len(img_metas)) ] enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ self.loss_single(enc_cls_scores, enc_bbox_preds, gt_bboxes_list, binary_labels_list, img_metas, gt_bboxes_ignore) loss_dict['enc_loss_cls'] = enc_loss_cls loss_dict['enc_loss_bbox'] = enc_losses_bbox loss_dict['enc_loss_iou'] = enc_losses_iou # loss from the last decoder layer loss_dict['loss_cls'] = losses_cls[-1] loss_dict['loss_bbox'] = losses_bbox[-1] loss_dict['loss_iou'] = losses_iou[-1] # loss from other decoder layers num_dec_layer = 0 for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], losses_bbox[:-1], losses_iou[:-1]): loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i num_dec_layer += 1 return loss_dict @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def get_bboxes(self, all_cls_scores, all_bbox_preds, enc_cls_scores, enc_bbox_preds, img_metas, rescale=False): """Transform network outputs for a batch into bbox predictions. Args: all_cls_scores (Tensor): Classification score of all decoder layers, has shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds (Tensor): Sigmoid regression outputs of all decode layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. enc_cls_scores (Tensor): Classification scores of points on encode feature map , has shape (N, h*w, num_classes). Only be passed when as_two_stage is True, otherwise is None. enc_bbox_preds (Tensor): Regression results of each points on the encode feature map, has shape (N, h*w, 4). Only be passed when as_two_stage is True, otherwise is None. img_metas (list[dict]): Meta information of each image. rescale (bool, optional): If True, return boxes in original image space. Default False. Returns: list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ The first item is an (n, 5) tensor, where the first 4 columns \ are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ 5-th column is a score between 0 and 1. The second item is a \ (n,) tensor where each item is the predicted class label of \ the corresponding box. """ cls_scores = all_cls_scores[-1] bbox_preds = all_bbox_preds[-1] result_list = [] for img_id in range(len(img_metas)): cls_score = cls_scores[img_id] bbox_pred = bbox_preds[img_id] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score, bbox_pred, img_shape, scale_factor, rescale) result_list.append(proposals) return result_list ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/dense_test_mixins.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import sys from inspect import signature import torch from mmcv.ops import batched_nms from mmdet.core import bbox_mapping_back, merge_aug_proposals if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import completed class BBoxTestMixin(object): """Mixin class for testing det bboxes via DenseHead.""" def simple_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes without test-time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,) """ outs = self.forward(feats) results_list = self.get_bboxes( *outs, img_metas=img_metas, rescale=rescale) return results_list def aug_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes with test time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,). The length of list should always be 1. """ # check with_nms argument gb_sig = signature(self.get_bboxes) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_bboxes_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_bboxes( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs[0]) aug_scores.append(bbox_outputs[1]) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs[2]) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) return [ (_det_bboxes, det_labels), ] def simple_test_rpn(self, x, img_metas): """Test without augmentation, only for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image, each item has shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). """ rpn_outs = self(x) proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas) return proposal_list def aug_test_rpn(self, feats, img_metas): """Test with augmentation for only for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image, each item has shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). """ samples_per_gpu = len(img_metas[0]) aug_proposals = [[] for _ in range(samples_per_gpu)] for x, img_meta in zip(feats, img_metas): proposal_list = self.simple_test_rpn(x, img_meta) for i, proposals in enumerate(proposal_list): aug_proposals[i].append(proposals) # reorganize the order of 'img_metas' to match the dimensions # of 'aug_proposals' aug_img_metas = [] for i in range(samples_per_gpu): aug_img_meta = [] for j in range(len(img_metas)): aug_img_meta.append(img_metas[j][i]) aug_img_metas.append(aug_img_meta) # after merging, proposals will be rescaled to the original image size merged_proposals = [ merge_aug_proposals(proposals, aug_img_meta, self.test_cfg) for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas) ] return merged_proposals if sys.version_info >= (3, 7): async def async_simple_test_rpn(self, x, img_metas): sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025) async with completed( __name__, 'rpn_head_forward', sleep_interval=sleep_interval): rpn_outs = self(x) proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas) return proposal_list def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). Returns: tuple[Tensor]: ``bboxes`` with shape (n,4), where 4 represent (tl_x, tl_y, br_x, br_y) and ``scores`` with shape (n,). """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) if aug_scores is None: return bboxes else: scores = torch.cat(aug_scores, dim=0) return bboxes, scores ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/detr_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d, Linear, build_activation_layer from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding from mmcv.runner import force_fp32 from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, build_assigner, build_sampler, multi_apply, reduce_mean) from mmdet.models.utils import build_transformer from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead @HEADS.register_module() class DETRHead(AnchorFreeHead): """Implements the DETR transformer head. See `paper: End-to-End Object Detection with Transformers `_ for details. Args: num_classes (int): Number of categories excluding the background. in_channels (int): Number of channels in the input feature map. num_query (int): Number of query in Transformer. num_reg_fcs (int, optional): Number of fully-connected layers used in `FFN`, which is then used for the regression head. Default 2. transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer. Default: None. sync_cls_avg_factor (bool): Whether to sync the avg_factor of all ranks. Default to False. positional_encoding (obj:`mmcv.ConfigDict`|dict): Config for position encoding. loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the classification loss. Default `CrossEntropyLoss`. loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the regression loss. Default `L1Loss`. loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the regression iou loss. Default `GIoULoss`. tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of transformer head. test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of transformer head. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ _version = 2 def __init__(self, num_classes, in_channels, num_query=100, num_reg_fcs=2, transformer=None, sync_cls_avg_factor=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), train_cfg=dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict( type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100), init_cfg=None, **kwargs): # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, # since it brings inconvenience when the initialization of # `AnchorFreeHead` is called. super(AnchorFreeHead, self).__init__(init_cfg) self.bg_cls_weight = 0 self.sync_cls_avg_factor = sync_cls_avg_factor class_weight = loss_cls.get('class_weight', None) if class_weight is not None and (self.__class__ is DETRHead): assert isinstance(class_weight, float), 'Expected ' \ 'class_weight to have type float. Found ' \ f'{type(class_weight)}.' # NOTE following the official DETR rep0, bg_cls_weight means # relative classification weight of the no-object class. bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) assert isinstance(bg_cls_weight, float), 'Expected ' \ 'bg_cls_weight to have type float. Found ' \ f'{type(bg_cls_weight)}.' class_weight = torch.ones(num_classes + 1) * class_weight # set background class as the last indice class_weight[num_classes] = bg_cls_weight loss_cls.update({'class_weight': class_weight}) if 'bg_cls_weight' in loss_cls: loss_cls.pop('bg_cls_weight') self.bg_cls_weight = bg_cls_weight if train_cfg: assert 'assigner' in train_cfg, 'assigner should be provided '\ 'when train_cfg is set.' assigner = train_cfg['assigner'] assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ 'The classification weight for loss and matcher should be' \ 'exactly the same.' assert loss_bbox['loss_weight'] == assigner['reg_cost'][ 'weight'], 'The regression L1 weight for loss and matcher ' \ 'should be exactly the same.' assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \ 'The regression iou weight for loss and matcher should be' \ 'exactly the same.' self.assigner = build_assigner(assigner) # DETR sampling=False, so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.num_query = num_query self.num_classes = num_classes self.in_channels = in_channels self.num_reg_fcs = num_reg_fcs self.train_cfg = train_cfg self.test_cfg = test_cfg self.fp16_enabled = False self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.loss_iou = build_loss(loss_iou) if self.loss_cls.use_sigmoid: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.act_cfg = transformer.get('act_cfg', dict(type='ReLU', inplace=True)) self.activate = build_activation_layer(self.act_cfg) self.positional_encoding = build_positional_encoding( positional_encoding) self.transformer = build_transformer(transformer) self.embed_dims = self.transformer.embed_dims assert 'num_feats' in positional_encoding num_feats = positional_encoding['num_feats'] assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ f' and {num_feats}.' self._init_layers() def _init_layers(self): """Initialize layers of the transformer head.""" self.input_proj = Conv2d( self.in_channels, self.embed_dims, kernel_size=1) self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) self.reg_ffn = FFN( self.embed_dims, self.embed_dims, self.num_reg_fcs, self.act_cfg, dropout=0.0, add_residual=False) self.fc_reg = Linear(self.embed_dims, 4) self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) def init_weights(self): """Initialize weights of the transformer head.""" # The initialization for transformer is important self.transformer.init_weights() def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """load checkpoints.""" # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, # since `AnchorFreeHead._load_from_state_dict` should not be # called here. Invoking the default `Module._load_from_state_dict` # is enough. # Names of some parameters in has been changed. version = local_metadata.get('version', None) if (version is None or version < 2) and self.__class__ is DETRHead: convert_dict = { '.self_attn.': '.attentions.0.', '.ffn.': '.ffns.0.', '.multihead_attn.': '.attentions.1.', '.decoder.norm.': '.decoder.post_norm.' } state_dict_keys = list(state_dict.keys()) for k in state_dict_keys: for ori_key, convert_key in convert_dict.items(): if ori_key in k: convert_key = k.replace(ori_key, convert_key) state_dict[convert_key] = state_dict[k] del state_dict[k] super(AnchorFreeHead, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, feats, img_metas): """Forward function. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - all_cls_scores_list (list[Tensor]): Classification scores \ for each scale level. Each is a 4D-tensor with shape \ [nb_dec, bs, num_query, cls_out_channels]. Note \ `cls_out_channels` should includes background. - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ outputs for each scale level. Each is a 4D-tensor with \ normalized coordinate format (cx, cy, w, h) and shape \ [nb_dec, bs, num_query, 4]. """ num_levels = len(feats) img_metas_list = [img_metas for _ in range(num_levels)] return multi_apply(self.forward_single, feats, img_metas_list) def forward_single(self, x, img_metas): """"Forward function for a single feature level. Args: x (Tensor): Input feature from backbone's single stage, shape [bs, c, h, w]. img_metas (list[dict]): List of image information. Returns: all_cls_scores (Tensor): Outputs from the classification head, shape [nb_dec, bs, num_query, cls_out_channels]. Note cls_out_channels should includes background. all_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h). Shape [nb_dec, bs, num_query, 4]. """ # construct binary masks which used for the transformer. # NOTE following the official DETR repo, non-zero values representing # ignored positions, while zero values means valid positions. batch_size = x.size(0) input_img_h, input_img_w = img_metas[0]['batch_input_shape'] masks = x.new_ones((batch_size, input_img_h, input_img_w)) for img_id in range(batch_size): img_h, img_w, _ = img_metas[img_id]['img_shape'] masks[img_id, :img_h, :img_w] = 0 x = self.input_proj(x) # interpolate masks to have the same spatial shape with x masks = F.interpolate( masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) # position encoding pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] # outs_dec: [nb_dec, bs, num_query, embed_dim] outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, pos_embed) all_cls_scores = self.fc_cls(outs_dec) all_bbox_preds = self.fc_reg(self.activate( self.reg_ffn(outs_dec))).sigmoid() return all_cls_scores, all_bbox_preds @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def loss(self, all_cls_scores_list, all_bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore=None): """"Loss function. Only outputs from the last feature level are used for computing losses by default. Args: all_cls_scores_list (list[Tensor]): Classification outputs for each feature level. Each is a 4D-tensor with shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds_list (list[Tensor]): Sigmoid regression outputs for each feature level. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (num_gts, ). img_metas (list[dict]): List of image meta information. gt_bboxes_ignore (list[Tensor], optional): Bounding boxes which can be ignored for each image. Default None. Returns: dict[str, Tensor]: A dictionary of loss components. """ # NOTE defaultly only the outputs from the last feature scale is used. all_cls_scores = all_cls_scores_list[-1] all_bbox_preds = all_bbox_preds_list[-1] assert gt_bboxes_ignore is None, \ 'Only supports for gt_bboxes_ignore setting to None.' num_dec_layers = len(all_cls_scores) all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] all_gt_bboxes_ignore_list = [ gt_bboxes_ignore for _ in range(num_dec_layers) ] img_metas_list = [img_metas for _ in range(num_dec_layers)] losses_cls, losses_bbox, losses_iou = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_gt_bboxes_list, all_gt_labels_list, img_metas_list, all_gt_bboxes_ignore_list) loss_dict = dict() # loss from the last decoder layer loss_dict['loss_cls'] = losses_cls[-1] loss_dict['loss_bbox'] = losses_bbox[-1] loss_dict['loss_iou'] = losses_iou[-1] # loss from other decoder layers num_dec_layer = 0 for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], losses_bbox[:-1], losses_iou[:-1]): loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i num_dec_layer += 1 return loss_dict def loss_single(self, cls_scores, bbox_preds, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list=None): """"Loss function for outputs from a single decoder layer of a single feature level. Args: cls_scores (Tensor): Box score logits from a single decoder layer for all images. Shape [bs, num_query, cls_out_channels]. bbox_preds (Tensor): Sigmoid outputs from a single decoder layer for all images, with normalized coordinate (cx, cy, w, h) and shape [bs, num_query, 4]. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (num_gts, ). img_metas (list[dict]): List of image meta information. gt_bboxes_ignore_list (list[Tensor], optional): Bounding boxes which can be ignored for each image. Default None. Returns: dict[str, Tensor]: A dictionary of loss components for outputs from a single decoder layer. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets labels = torch.cat(labels_list, 0) label_weights = torch.cat(label_weights_list, 0) bbox_targets = torch.cat(bbox_targets_list, 0) bbox_weights = torch.cat(bbox_weights_list, 0) # classification loss cls_scores = cls_scores.reshape(-1, self.cls_out_channels) # construct weighted avg_factor to match with the official DETR repo cls_avg_factor = num_total_pos * 1.0 + \ num_total_neg * self.bg_cls_weight if self.sync_cls_avg_factor: cls_avg_factor = reduce_mean( cls_scores.new_tensor([cls_avg_factor])) cls_avg_factor = max(cls_avg_factor, 1) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=cls_avg_factor) # Compute the average number of gt boxes across all gpus, for # normalization purposes num_total_pos = loss_cls.new_tensor([num_total_pos]) num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() # construct factors used for rescale bboxes factors = [] for img_meta, bbox_pred in zip(img_metas, bbox_preds): img_h, img_w, _ = img_meta['img_shape'] factor = bbox_pred.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0).repeat( bbox_pred.size(0), 1) factors.append(factor) factors = torch.cat(factors, 0) # DETR regress the relative position of boxes (cxcywh) in the image, # thus the learning target is normalized by the image size. So here # we need to re-scale them for calculating IoU loss bbox_preds = bbox_preds.reshape(-1, 4) bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors # regression IoU loss, defaultly GIoU loss loss_iou = self.loss_iou( bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) # regression L1 loss loss_bbox = self.loss_bbox( bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) return loss_cls, loss_bbox, loss_iou def get_targets(self, cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list=None): """"Compute regression and classification targets for a batch image. Outputs from a single decoder layer of a single feature level are used. Args: cls_scores_list (list[Tensor]): Box score logits from a single decoder layer for each image with shape [num_query, cls_out_channels]. bbox_preds_list (list[Tensor]): Sigmoid outputs from a single decoder layer for each image, with normalized coordinate (cx, cy, w, h) and shape [num_query, 4]. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (num_gts, ). img_metas (list[dict]): List of image meta information. gt_bboxes_ignore_list (list[Tensor], optional): Bounding boxes which can be ignored for each image. Default None. Returns: tuple: a tuple containing the following targets. - labels_list (list[Tensor]): Labels for all images. - label_weights_list (list[Tensor]): Label weights for all \ images. - bbox_targets_list (list[Tensor]): BBox targets for all \ images. - bbox_weights_list (list[Tensor]): BBox weights for all \ images. - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. """ assert gt_bboxes_ignore_list is None, \ 'Only supports for gt_bboxes_ignore setting to None.' num_imgs = len(cls_scores_list) gt_bboxes_ignore_list = [ gt_bboxes_ignore_list for _ in range(num_imgs) ] (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) num_total_pos = sum((inds.numel() for inds in pos_inds_list)) num_total_neg = sum((inds.numel() for inds in neg_inds_list)) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, cls_score, bbox_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None): """"Compute regression and classification targets for one image. Outputs from a single decoder layer of a single feature level are used. Args: cls_score (Tensor): Box score logits from a single decoder layer for one image. Shape [num_query, cls_out_channels]. bbox_pred (Tensor): Sigmoid outputs from a single decoder layer for one image, with normalized coordinate (cx, cy, w, h) and shape [num_query, 4]. gt_bboxes (Tensor): Ground truth bboxes for one image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth class indices for one image with shape (num_gts, ). img_meta (dict): Meta information for one image. gt_bboxes_ignore (Tensor, optional): Bounding boxes which can be ignored. Default None. Returns: tuple[Tensor]: a tuple containing the following for one image. - labels (Tensor): Labels of each image. - label_weights (Tensor]): Label weights of each image. - bbox_targets (Tensor): BBox targets of each image. - bbox_weights (Tensor): BBox weights of each image. - pos_inds (Tensor): Sampled positive indices for each image. - neg_inds (Tensor): Sampled negative indices for each image. """ num_bboxes = bbox_pred.size(0) # assigner and sampler assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore) sampling_result = self.sampler.sample(assign_result, bbox_pred, gt_bboxes) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # label targets labels = gt_bboxes.new_full((num_bboxes, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_bboxes.new_ones(num_bboxes) # bbox targets bbox_targets = torch.zeros_like(bbox_pred) bbox_weights = torch.zeros_like(bbox_pred) bbox_weights[pos_inds] = 1.0 img_h, img_w, _ = img_meta['img_shape'] # DETR regress the relative position of boxes (cxcywh) in the image. # Thus the learning target should be normalized by the image size, also # the box format should be converted from defaultly x1y1x2y2 to cxcywh. factor = bbox_pred.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) bbox_targets[pos_inds] = pos_gt_bboxes_targets return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) # over-write because img_metas are needed as inputs for bbox_head. def forward_train(self, x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None, **kwargs): """Forward function for training mode. Args: x (list[Tensor]): Features from backbone. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert proposal_cfg is None, '"proposal_cfg" must be None' outs = self(x, img_metas) if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) return losses @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def get_bboxes(self, all_cls_scores_list, all_bbox_preds_list, img_metas, rescale=False): """Transform network outputs for a batch into bbox predictions. Args: all_cls_scores_list (list[Tensor]): Classification outputs for each feature level. Each is a 4D-tensor with shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds_list (list[Tensor]): Sigmoid regression outputs for each feature level. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. img_metas (list[dict]): Meta information of each image. rescale (bool, optional): If True, return boxes in original image space. Default False. Returns: list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ The first item is an (n, 5) tensor, where the first 4 columns \ are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ 5-th column is a score between 0 and 1. The second item is a \ (n,) tensor where each item is the predicted class label of \ the corresponding box. """ # NOTE defaultly only using outputs from the last feature level, # and only the outputs from the last decoder layer is used. cls_scores = all_cls_scores_list[-1][-1] bbox_preds = all_bbox_preds_list[-1][-1] result_list = [] for img_id in range(len(img_metas)): cls_score = cls_scores[img_id] bbox_pred = bbox_preds[img_id] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score, bbox_pred, img_shape, scale_factor, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_score, bbox_pred, img_shape, scale_factor, rescale=False): """Transform outputs from the last decoder layer into bbox predictions for each image. Args: cls_score (Tensor): Box score logits from the last decoder layer for each image. Shape [num_query, cls_out_channels]. bbox_pred (Tensor): Sigmoid outputs from the last decoder layer for each image, with coordinate format (cx, cy, w, h) and shape [num_query, 4]. img_shape (tuple[int]): Shape of input image, (height, width, 3). scale_factor (ndarray, optional): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). rescale (bool, optional): If True, return boxes in original image space. Default False. Returns: tuple[Tensor]: Results of detected bboxes and labels. - det_bboxes: Predicted bboxes with shape [num_query, 5], \ where the first 4 columns are bounding box positions \ (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \ between 0 and 1. - det_labels: Predicted labels of the corresponding box with \ shape [num_query]. """ assert len(cls_score) == len(bbox_pred) max_per_img = self.test_cfg.get('max_per_img', self.num_query) # exclude background if self.loss_cls.use_sigmoid: cls_score = cls_score.sigmoid() scores, indexes = cls_score.view(-1).topk(max_per_img) det_labels = indexes % self.num_classes bbox_index = indexes // self.num_classes bbox_pred = bbox_pred[bbox_index] else: scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) scores, bbox_index = scores.topk(max_per_img) bbox_pred = bbox_pred[bbox_index] det_labels = det_labels[bbox_index] det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) if rescale: det_bboxes /= det_bboxes.new_tensor(scale_factor) det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) return det_bboxes, det_labels def simple_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,) """ # forward of this head requires img_metas outs = self.forward(feats, img_metas) results_list = self.get_bboxes(*outs, img_metas, rescale=rescale) return results_list def forward_onnx(self, feats, img_metas): """Forward function for exporting to ONNX. Over-write `forward` because: `masks` is directly created with zero (valid position tag) and has the same spatial size as `x`. Thus the construction of `masks` is different from that in `forward`. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - all_cls_scores_list (list[Tensor]): Classification scores \ for each scale level. Each is a 4D-tensor with shape \ [nb_dec, bs, num_query, cls_out_channels]. Note \ `cls_out_channels` should includes background. - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ outputs for each scale level. Each is a 4D-tensor with \ normalized coordinate format (cx, cy, w, h) and shape \ [nb_dec, bs, num_query, 4]. """ num_levels = len(feats) img_metas_list = [img_metas for _ in range(num_levels)] return multi_apply(self.forward_single_onnx, feats, img_metas_list) def forward_single_onnx(self, x, img_metas): """"Forward function for a single feature level with ONNX exportation. Args: x (Tensor): Input feature from backbone's single stage, shape [bs, c, h, w]. img_metas (list[dict]): List of image information. Returns: all_cls_scores (Tensor): Outputs from the classification head, shape [nb_dec, bs, num_query, cls_out_channels]. Note cls_out_channels should includes background. all_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h). Shape [nb_dec, bs, num_query, 4]. """ # Note `img_shape` is not dynamically traceable to ONNX, # since the related augmentation was done with numpy under # CPU. Thus `masks` is directly created with zeros (valid tag) # and the same spatial shape as `x`. # The difference between torch and exported ONNX model may be # ignored, since the same performance is achieved (e.g. # 40.1 vs 40.1 for DETR) batch_size = x.size(0) h, w = x.size()[-2:] masks = x.new_zeros((batch_size, h, w)) # [B,h,w] x = self.input_proj(x) # interpolate masks to have the same spatial shape with x masks = F.interpolate( masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) pos_embed = self.positional_encoding(masks) outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, pos_embed) all_cls_scores = self.fc_cls(outs_dec) all_bbox_preds = self.fc_reg(self.activate( self.reg_ffn(outs_dec))).sigmoid() return all_cls_scores, all_bbox_preds def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_metas): """Transform network outputs into bbox predictions, with ONNX exportation. Args: all_cls_scores_list (list[Tensor]): Classification outputs for each feature level. Each is a 4D-tensor with shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds_list (list[Tensor]): Sigmoid regression outputs for each feature level. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. img_metas (list[dict]): Meta information of each image. Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ assert len(img_metas) == 1, \ 'Only support one input image while in exporting to ONNX' cls_scores = all_cls_scores_list[-1][-1] bbox_preds = all_bbox_preds_list[-1][-1] # Note `img_shape` is not dynamically traceable to ONNX, # here `img_shape_for_onnx` (padded shape of image tensor) # is used. img_shape = img_metas[0]['img_shape_for_onnx'] max_per_img = self.test_cfg.get('max_per_img', self.num_query) batch_size = cls_scores.size(0) # `batch_index_offset` is used for the gather of concatenated tensor batch_index_offset = torch.arange(batch_size).to( cls_scores.device) * max_per_img batch_index_offset = batch_index_offset.unsqueeze(1).expand( batch_size, max_per_img) # supports dynamical batch inference if self.loss_cls.use_sigmoid: cls_scores = cls_scores.sigmoid() scores, indexes = cls_scores.view(batch_size, -1).topk( max_per_img, dim=1) det_labels = indexes % self.num_classes bbox_index = indexes // self.num_classes bbox_index = (bbox_index + batch_index_offset).view(-1) bbox_preds = bbox_preds.view(-1, 4)[bbox_index] bbox_preds = bbox_preds.view(batch_size, -1, 4) else: scores, det_labels = F.softmax( cls_scores, dim=-1)[..., :-1].max(-1) scores, bbox_index = scores.topk(max_per_img, dim=1) bbox_index = (bbox_index + batch_index_offset).view(-1) bbox_preds = bbox_preds.view(-1, 4)[bbox_index] det_labels = det_labels.view(-1)[bbox_index] bbox_preds = bbox_preds.view(batch_size, -1, 4) det_labels = det_labels.view(batch_size, -1) det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds) # use `img_shape_tensor` for dynamically exporting to ONNX img_shape_tensor = img_shape.flip(0).repeat(2) # [w,h,w,h] img_shape_tensor = img_shape_tensor.unsqueeze(0).unsqueeze(0).expand( batch_size, det_bboxes.size(1), 4) det_bboxes = det_bboxes * img_shape_tensor # dynamically clip bboxes x1, y1, x2, y2 = det_bboxes.split((1, 1, 1, 1), dim=-1) from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, img_shape) det_bboxes = torch.cat([x1, y1, x2, y2], dim=-1) det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1) return det_bboxes, det_labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/embedding_rpn_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.runner import BaseModule from mmdet.models.builder import HEADS from ...core import bbox_cxcywh_to_xyxy @HEADS.register_module() class EmbeddingRPNHead(BaseModule): """RPNHead in the `Sparse R-CNN `_ . Unlike traditional RPNHead, this module does not need FPN input, but just decode `init_proposal_bboxes` and expand the first dimension of `init_proposal_bboxes` and `init_proposal_features` to the batch_size. Args: num_proposals (int): Number of init_proposals. Default 100. proposal_feature_channel (int): Channel number of init_proposal_feature. Defaults to 256. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_proposals=100, proposal_feature_channel=256, init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(EmbeddingRPNHead, self).__init__(init_cfg) self.num_proposals = num_proposals self.proposal_feature_channel = proposal_feature_channel self._init_layers() def _init_layers(self): """Initialize a sparse set of proposal boxes and proposal features.""" self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4) self.init_proposal_features = nn.Embedding( self.num_proposals, self.proposal_feature_channel) def init_weights(self): """Initialize the init_proposal_bboxes as normalized. [c_x, c_y, w, h], and we initialize it to the size of the entire image. """ super(EmbeddingRPNHead, self).init_weights() nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5) nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1) def _decode_init_proposals(self, imgs, img_metas): """Decode init_proposal_bboxes according to the size of images and expand dimension of init_proposal_features to batch_size. Args: imgs (list[Tensor]): List of FPN features. img_metas (list[dict]): List of meta-information of images. Need the img_shape to decode the init_proposals. Returns: Tuple(Tensor): - proposals (Tensor): Decoded proposal bboxes, has shape (batch_size, num_proposals, 4). - init_proposal_features (Tensor): Expanded proposal features, has shape (batch_size, num_proposals, proposal_feature_channel). - imgs_whwh (Tensor): Tensor with shape (batch_size, 4), the dimension means [img_width, img_height, img_width, img_height]. """ proposals = self.init_proposal_bboxes.weight.clone() proposals = bbox_cxcywh_to_xyxy(proposals) num_imgs = len(imgs[0]) imgs_whwh = [] for meta in img_metas: h, w, _ = meta['img_shape'] imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]])) imgs_whwh = torch.cat(imgs_whwh, dim=0) imgs_whwh = imgs_whwh[:, None, :] # imgs_whwh has shape (batch_size, 1, 4) # The shape of proposals change from (num_proposals, 4) # to (batch_size ,num_proposals, 4) proposals = proposals * imgs_whwh init_proposal_features = self.init_proposal_features.weight.clone() init_proposal_features = init_proposal_features[None].expand( num_imgs, *init_proposal_features.size()) return proposals, init_proposal_features, imgs_whwh def forward_dummy(self, img, img_metas): """Dummy forward function. Used in flops calculation. """ return self._decode_init_proposals(img, img_metas) def forward_train(self, img, img_metas): """Forward function in training stage.""" return self._decode_init_proposals(img, img_metas) def simple_test_rpn(self, img, img_metas): """Forward function in testing stage.""" return self._decode_init_proposals(img, img_metas) def simple_test(self, img, img_metas): """Forward function in testing stage.""" raise NotImplementedError def aug_test_rpn(self, feats, img_metas): raise NotImplementedError( 'EmbeddingRPNHead does not support test-time augmentation') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/fcos_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn from mmcv.cnn import Scale from mmcv.runner import force_fp32 from mmdet.core import multi_apply, reduce_mean from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead INF = 1e8 @HEADS.register_module() class FCOSHead(AnchorFreeHead): """Anchor-free head used in `FCOS `_. The FCOS head does not use anchor boxes. Instead bounding boxes are predicted at each pixel and a centerness measure is used to suppress low-quality predictions. Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training tricks used in official repo, which will bring remarkable mAP gains of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for more detail. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. strides (list[int] | list[tuple[int, int]]): Strides of points in multiple feature levels. Default: (4, 8, 16, 32, 64). regress_ranges (tuple[tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Default: False. center_sample_radius (float): Radius of center sampling. Default: 1.5. norm_on_bbox (bool): If true, normalize the regression targets with FPN strides. Default: False. centerness_on_reg (bool): If true, position centerness on the regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. Default: False. conv_bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. loss_centerness (dict): Config of centerness loss. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> self = FCOSHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, centerness = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes, in_channels, regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling=False, center_sample_radius=1.5, norm_on_bbox=False, centerness_on_reg=False, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)), **kwargs): self.regress_ranges = regress_ranges self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.norm_on_bbox = norm_on_bbox self.centerness_on_reg = centerness_on_reg super().__init__( num_classes, in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, norm_cfg=norm_cfg, init_cfg=init_cfg, **kwargs) self.loss_centerness = build_loss(loss_centerness) def _init_layers(self): """Initialize layers of the head.""" super()._init_layers() self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is \ num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each \ scale level, each is a 4D-tensor, the channel number is \ num_points * 4. centernesses (list[Tensor]): centerness for each scale level, \ each is a 4D-tensor, the channel number is num_points * 1. """ return multi_apply(self.forward_single, feats, self.scales, self.strides) def forward_single(self, x, scale, stride): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions and centerness \ predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) if self.centerness_on_reg: centerness = self.conv_centerness(reg_feat) else: centerness = self.conv_centerness(cls_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() if self.norm_on_bbox: # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) if not self.training: bbox_pred *= stride else: bbox_pred = bbox_pred.exp() return cls_score, bbox_pred, centerness @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. centernesses (list[Tensor]): centerness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes, gt_labels) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and centerness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_centerness = [ centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) num_pos = torch.tensor( len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) num_pos = max(reduce_mean(num_pos), 1.0) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_centerness_targets = self.centerness_target(pos_bbox_targets) # centerness weighted iou loss centerness_denorm = max( reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) if len(pos_inds) > 0: pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=centerness_denorm) loss_centerness = self.loss_centerness( pos_centerness, pos_centerness_targets, avg_factor=num_pos) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness) def get_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). Returns: tuple: concat_lvl_labels (list[Tensor]): Labels of each level. \ concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # get labels and bbox_targets of each image labels_list, bbox_targets_list = multi_apply( self._get_target_single, gt_bboxes_list, gt_labels_list, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [ bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) bbox_targets = torch.cat( [bbox_targets[i] for bbox_targets in bbox_targets_list]) if self.norm_on_bbox: bbox_targets = bbox_targets / self.strides[i] concat_lvl_bbox_targets.append(bbox_targets) return concat_lvl_labels, concat_lvl_bbox_targets def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges, num_points_per_lvl): """Compute regression and classification targets for a single image.""" num_points = points.size(0) num_gts = gt_labels.size(0) if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 4)) areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) areas = areas[None].repeat(num_points, 1) regress_ranges = regress_ranges[:, None, :].expand( num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if self.center_sampling: # condition1: inside a `center bbox` radius = self.center_sample_radius center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 center_gts = torch.zeros_like(gt_bboxes) stride = center_xs.new_zeros(center_xs.shape) # project the points on current lvl back to the `original` sizes lvl_begin = 0 for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): lvl_end = lvl_begin + num_points_lvl stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius lvl_begin = lvl_end x_mins = center_xs - stride y_mins = center_ys - stride x_maxs = center_xs + stride y_maxs = center_ys + stride center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0]) center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1]) center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs) center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs) cb_dist_left = xs - center_gts[..., 0] cb_dist_right = center_gts[..., 2] - xs cb_dist_top = ys - center_gts[..., 1] cb_dist_bottom = center_gts[..., 3] - ys center_bbox = torch.stack( (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 else: # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # condition2: limit the regression range for each location max_regress_distance = bbox_targets.max(-1)[0] inside_regress_range = ( (max_regress_distance >= regress_ranges[..., 0]) & (max_regress_distance <= regress_ranges[..., 1])) # if there are still more than one objects for a location, # we choose the one with minimal area areas[inside_gt_bbox_mask == 0] = INF areas[inside_regress_range == 0] = INF min_area, min_area_inds = areas.min(dim=1) labels = gt_labels[min_area_inds] labels[min_area == INF] = self.num_classes # set as BG bbox_targets = bbox_targets[range(num_points), min_area_inds] return labels, bbox_targets def centerness_target(self, pos_bbox_targets): """Compute centerness targets. Args: pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape (num_pos, 4) Returns: Tensor: Centerness target. """ # only calculate pos centerness targets, otherwise there may be nan left_right = pos_bbox_targets[:, [0, 2]] top_bottom = pos_bbox_targets[:, [1, 3]] if len(left_right) == 0: centerness_targets = left_right[..., 0] else: centerness_targets = ( left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) return torch.sqrt(centerness_targets) def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points according to feature map size. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `FCOSHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map ' 'with `self.prior_generator.single_level_grid_priors` ') y, x = super()._get_points_single(featmap_size, stride, dtype, device) points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), dim=-1) + stride // 2 return points ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/fovea_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmcv.runner import BaseModule from mmdet.core import multi_apply from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS from .anchor_free_head import AnchorFreeHead INF = 1e8 class FeatureAlign(BaseModule): def __init__(self, in_channels, out_channels, kernel_size=3, deform_groups=4, init_cfg=dict( type='Normal', layer='Conv2d', std=0.1, override=dict( type='Normal', name='conv_adaption', std=0.01))): super(FeatureAlign, self).__init__(init_cfg) offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 4, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def forward(self, x, shape): offset = self.conv_offset(shape) x = self.relu(self.conv_adaption(x, offset)) return x @HEADS.register_module() class FoveaHead(AnchorFreeHead): """FoveaBox: Beyond Anchor-based Object Detector https://arxiv.org/abs/1904.03797 """ def __init__(self, num_classes, in_channels, base_edge_list=(16, 32, 64, 128, 256), scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), sigma=0.4, with_deform=False, deform_groups=4, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)), **kwargs): self.base_edge_list = base_edge_list self.scale_ranges = scale_ranges self.sigma = sigma self.with_deform = with_deform self.deform_groups = deform_groups super().__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): # box branch super()._init_reg_convs() self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) # cls branch if not self.with_deform: super()._init_cls_convs() self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) else: self.cls_convs = nn.ModuleList() self.cls_convs.append( ConvModule( self.feat_channels, (self.feat_channels * 4), 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.cls_convs.append( ConvModule((self.feat_channels * 4), (self.feat_channels * 4), 1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.feature_adaption = FeatureAlign( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = nn.Conv2d( int(self.feat_channels * 4), self.cls_out_channels, 3, padding=1) def forward_single(self, x): cls_feat = x reg_feat = x for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) if self.with_deform: cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) return cls_score, bbox_pred def loss(self, cls_scores, bbox_preds, gt_bbox_list, gt_label_list, img_metas, gt_bboxes_ignore=None): assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) num_imgs = cls_scores[0].size(0) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_labels, flatten_bbox_targets = self.get_targets( gt_bbox_list, gt_label_list, featmap_sizes, points) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < self.num_classes)).nonzero().view(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) if num_pos > 0: pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_weights = pos_bbox_targets.new_zeros( pos_bbox_targets.size()) + 1.0 loss_bbox = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, pos_weights, avg_factor=num_pos) else: loss_bbox = torch.tensor( 0, dtype=flatten_bbox_preds.dtype, device=flatten_bbox_preds.device) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points): label_list, bbox_target_list = multi_apply( self._get_target_single, gt_bbox_list, gt_label_list, featmap_size_list=featmap_sizes, point_list=points) flatten_labels = [ torch.cat([ labels_level_img.flatten() for labels_level_img in labels_level ]) for labels_level in zip(*label_list) ] flatten_bbox_targets = [ torch.cat([ bbox_targets_level_img.reshape(-1, 4) for bbox_targets_level_img in bbox_targets_level ]) for bbox_targets_level in zip(*bbox_target_list) ] flatten_labels = torch.cat(flatten_labels) flatten_bbox_targets = torch.cat(flatten_bbox_targets) return flatten_labels, flatten_bbox_targets def _get_target_single(self, gt_bboxes_raw, gt_labels_raw, featmap_size_list=None, point_list=None): gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) label_list = [] bbox_target_list = [] # for each pyramid, find the cls and box target for base_len, (lower_bound, upper_bound), stride, featmap_size, \ points in zip(self.base_edge_list, self.scale_ranges, self.strides, featmap_size_list, point_list): # FG cat_id: [0, num_classes -1], BG cat_id: num_classes points = points.view(*featmap_size, 2) x, y = points[..., 0], points[..., 1] labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1], 4) + 1 # scale assignment hit_indices = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(hit_indices) == 0: label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) continue _, hit_index_order = torch.sort(-gt_areas[hit_indices]) hit_indices = hit_indices[hit_index_order] gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride gt_labels = gt_labels_raw[hit_indices] half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) # valid fovea area: left, right, top, down pos_left = torch.ceil( gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \ clamp(0, featmap_size[1] - 1) pos_right = torch.floor( gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \ clamp(0, featmap_size[1] - 1) pos_top = torch.ceil( gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \ clamp(0, featmap_size[0] - 1) pos_down = torch.floor( gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \ clamp(0, featmap_size[0] - 1) for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ zip(pos_left, pos_top, pos_right, pos_down, gt_labels, gt_bboxes_raw[hit_indices, :]): labels[py1:py2 + 1, px1:px2 + 1] = label bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ (x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ (y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ (gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ (gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) return label_list, bbox_target_list # Same as base_dense_head/_get_bboxes_single except self._bbox_decode def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. Fovea head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 2). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, self.strides, self.base_edge_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale, with_nms) def _bbox_decode(self, priors, bbox_pred, base_len, max_shape): bbox_pred = bbox_pred.exp() y = priors[:, 1] x = priors[:, 0] x1 = (x - base_len * bbox_pred[:, 0]). \ clamp(min=0, max=max_shape[1] - 1) y1 = (y - base_len * bbox_pred[:, 1]). \ clamp(min=0, max=max_shape[0] - 1) x2 = (x + base_len * bbox_pred[:, 2]). \ clamp(min=0, max=max_shape[1] - 1) y2 = (y + base_len * bbox_pred[:, 3]). \ clamp(min=0, max=max_shape[0] - 1) decoded_bboxes = torch.stack([x1, y1, x2, y2], -1) return decoded_bboxes def _get_points_single(self, *args, **kwargs): """Get points according to feature map size. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `FoveaHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map ' 'with `self.prior_generator.single_level_grid_priors` ') y, x = super()._get_points_single(*args, **kwargs) return y + 0.5, x + 0.5 ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/free_anchor_retina_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn.functional as F from mmdet.core import bbox_overlaps from ..builder import HEADS from .retina_head import RetinaHead EPS = 1e-12 @HEADS.register_module() class FreeAnchorRetinaHead(RetinaHead): """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 4. conv_cfg (dict): dictionary to construct and config conv layer. Default: None. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). pre_anchor_topk (int): Number of boxes that be token in each bag. bbox_thr (float): The threshold of the saturated linear function. It is usually the same with the IoU threshold used in NMS. gamma (float): Gamma parameter in focal loss. alpha (float): Alpha parameter in focal loss. """ # noqa: W605 def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, pre_anchor_topk=50, bbox_thr=0.6, gamma=2.0, alpha=0.5, **kwargs): super(FreeAnchorRetinaHead, self).__init__(num_classes, in_channels, stacked_convs, conv_cfg, norm_cfg, **kwargs) self.pre_anchor_topk = pre_anchor_topk self.bbox_thr = bbox_thr self.gamma = gamma self.alpha = alpha def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, _ = self.get_anchors( featmap_sizes, img_metas, device=device) anchors = [torch.cat(anchor) for anchor in anchor_list] # concatenate each level cls_scores = [ cls.permute(0, 2, 3, 1).reshape(cls.size(0), -1, self.cls_out_channels) for cls in cls_scores ] bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) for bbox_pred in bbox_preds ] cls_scores = torch.cat(cls_scores, dim=1) bbox_preds = torch.cat(bbox_preds, dim=1) cls_prob = torch.sigmoid(cls_scores) box_prob = [] num_pos = 0 positive_losses = [] for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, bbox_preds_) in enumerate( zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)): with torch.no_grad(): if len(gt_bboxes_) == 0: image_box_prob = torch.zeros( anchors_.size(0), self.cls_out_channels).type_as(bbox_preds_) else: # box_localization: a_{j}^{loc}, shape: [j, 4] pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_) # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes) # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] t1 = self.bbox_thr t2 = object_box_iou.max( dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp( min=0, max=1) # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] num_obj = gt_labels_.size(0) indices = torch.stack([ torch.arange(num_obj).type_as(gt_labels_), gt_labels_ ], dim=0) object_cls_box_prob = torch.sparse_coo_tensor( indices, object_box_prob) # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] """ from "start" to "end" implement: image_box_iou = torch.sparse.max(object_cls_box_prob, dim=0).t() """ # start box_cls_prob = torch.sparse.sum( object_cls_box_prob, dim=0).to_dense() indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() if indices.numel() == 0: image_box_prob = torch.zeros( anchors_.size(0), self.cls_out_channels).type_as(object_box_prob) else: nonzero_box_prob = torch.where( (gt_labels_.unsqueeze(dim=-1) == indices[0]), object_box_prob[:, indices[1]], torch.tensor([ 0 ]).type_as(object_box_prob)).max(dim=0).values # upmap to shape [j, c] image_box_prob = torch.sparse_coo_tensor( indices.flip([0]), nonzero_box_prob, size=(anchors_.size(0), self.cls_out_channels)).to_dense() # end box_prob.append(image_box_prob) # construct bags for objects match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_) _, matched = torch.topk( match_quality_matrix, self.pre_anchor_topk, dim=1, sorted=False) del match_quality_matrix # matched_cls_prob: P_{ij}^{cls} matched_cls_prob = torch.gather( cls_prob_[matched], 2, gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, 1)).squeeze(2) # matched_box_prob: P_{ij}^{loc} matched_anchors = anchors_[matched] matched_object_targets = self.bbox_coder.encode( matched_anchors, gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors)) loss_bbox = self.loss_bbox( bbox_preds_[matched], matched_object_targets, reduction_override='none').sum(-1) matched_box_prob = torch.exp(-loss_bbox) # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} num_pos += len(gt_bboxes_) positive_losses.append( self.positive_bag_loss(matched_cls_prob, matched_box_prob)) positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) # box_prob: P{a_{j} \in A_{+}} box_prob = torch.stack(box_prob, dim=0) # negative_loss: # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( 1, num_pos * self.pre_anchor_topk) # avoid the absence of gradients in regression subnet # when no ground-truth in a batch if num_pos == 0: positive_loss = bbox_preds.sum() * 0 losses = { 'positive_bag_loss': positive_loss, 'negative_bag_loss': negative_loss } return losses def positive_bag_loss(self, matched_cls_prob, matched_box_prob): """Compute positive bag loss. :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`. :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples. :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples. Args: matched_cls_prob (Tensor): Classification probability of matched samples in shape (num_gt, pre_anchor_topk). matched_box_prob (Tensor): BBox probability of matched samples, in shape (num_gt, pre_anchor_topk). Returns: Tensor: Positive bag loss in shape (num_gt,). """ # noqa: E501, W605 # bag_prob = Mean-max(matched_prob) matched_prob = matched_cls_prob * matched_box_prob weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) weight /= weight.sum(dim=1).unsqueeze(dim=-1) bag_prob = (weight * matched_prob).sum(dim=1) # positive_bag_loss = -self.alpha * log(bag_prob) return self.alpha * F.binary_cross_entropy( bag_prob, torch.ones_like(bag_prob), reduction='none') def negative_bag_loss(self, cls_prob, box_prob): """Compute negative bag loss. :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`. :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples. :math:`P_{j}^{bg}`: Classification probability of negative samples. Args: cls_prob (Tensor): Classification probability, in shape (num_img, num_anchors, num_classes). box_prob (Tensor): Box probability, in shape (num_img, num_anchors, num_classes). Returns: Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). """ # noqa: E501, W605 prob = cls_prob * (1 - box_prob) # There are some cases when neg_prob = 0. # This will cause the neg_prob.log() to be inf without clamp. prob = prob.clamp(min=EPS, max=1 - EPS) negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( prob, torch.zeros_like(prob), reduction='none') return (1 - self.alpha) * negative_bag_loss ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/fsaf_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply, unmap) from ..builder import HEADS from ..losses.accuracy import accuracy from ..losses.utils import weight_reduce_loss from .retina_head import RetinaHead @HEADS.register_module() class FSAFHead(RetinaHead): """Anchor-free head used in `FSAF `_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors (num_anchors is 1 for anchor- free methods) Args: *args: Same as its base class in :class:`RetinaHead` score_threshold (float, optional): The score_threshold to calculate positive recall. If given, prediction scores lower than this value is counted as incorrect prediction. Default to None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None **kwargs: Same as its base class in :class:`RetinaHead` Example: >>> import torch >>> self = FSAFHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == self.num_classes >>> assert box_per_anchor == 4 """ def __init__(self, *args, score_threshold=None, init_cfg=None, **kwargs): # The positive bias in self.retina_reg conv is to prevent predicted \ # bbox with 0 area if init_cfg is None: init_cfg = dict( type='Normal', layer='Conv2d', std=0.01, override=[ dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01), dict( type='Normal', name='retina_reg', std=0.01, bias=0.25) ]) super().__init__(*args, init_cfg=init_cfg, **kwargs) self.score_threshold = score_threshold def forward_single(self, x): """Forward feature map of a single scale level. Args: x (Tensor): Feature map of a single scale level. Returns: tuple (Tensor): cls_score (Tensor): Box scores for each scale level Has shape (N, num_points * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). """ cls_score, bbox_pred = super().forward_single(x) # relu: TBLR encoder only accepts positive bbox_pred return cls_score, self.relu(bbox_pred) def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Most of the codes are the same with the base class :obj: `AnchorHead`, except that it also collects and returns the matched gt index in the image (from 0 to num_gt-1). If the anchor bbox is not matched to any gt, the corresponding value in pos_gt_inds is -1. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # Assign gt and sample anchors anchors = flat_anchors[inside_flags.type(torch.bool), :] assign_result = self.assigner.assign( anchors, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros((num_valid_anchors, label_channels), dtype=torch.float) pos_gt_inds = anchors.new_full((num_valid_anchors, ), -1, dtype=torch.long) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, both # the predicted boxes and regression targets should be with # absolute coordinate format. pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 # The assigned gt_index for each anchor. (0-based) pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # shadowed_labels is a tensor composed of tuples # (anchor_inds, class_label) that indicate those anchors lying in the # outer region of a gt or overlapped by another gt with a smaller # area. # # Therefore, only the shadowed labels are ignored for loss calculation. # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner` shadowed_labels = assign_result.get_extra_property('shadowed_labels') if shadowed_labels is not None and shadowed_labels.numel(): if len(shadowed_labels.shape) == 2: idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1] assert (labels[idx_] != label_).all(), \ 'One label cannot be both positive and ignored' label_weights[idx_, label_] = 0 else: label_weights[shadowed_labels] = 0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap(labels, num_total_anchors, inside_flags) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) pos_gt_inds = unmap( pos_gt_inds, num_total_anchors, inside_flags, fill=-1) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result, pos_gt_inds) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ for i in range(len(bbox_preds)): # loop over fpn level # avoid 0 area of the predicted bbox bbox_preds[i] = bbox_preds[i].clamp(min=1e-4) # TODO: It may directly use the base-class loss function. featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels batch_size = len(gt_bboxes) device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, pos_assigned_gt_inds_list) = cls_reg_targets num_gts = np.array(list(map(len, gt_labels))) num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned # gt index of each anchor bbox in each fpn level. cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size for i, assign in enumerate(pos_assigned_gt_inds_list): # loop over fpn levels for j in range(1, batch_size): # loop over batch size # Convert gt indices in each img to those in the batch assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1]) pos_assigned_gt_inds_list[i] = assign.flatten() labels_list[i] = labels_list[i].flatten() num_gts = sum(map(len, gt_labels)) # total number of gt in the batch # The unique label index of each gt in the batch label_sequence = torch.arange(num_gts, device=device) # Collect the average loss of each gt in each level with torch.no_grad(): loss_levels, = multi_apply( self.collect_loss_level_single, losses_cls, losses_bbox, pos_assigned_gt_inds_list, labels_seq=label_sequence) # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level loss_levels = torch.stack(loss_levels, dim=0) # Locate the best fpn level for loss back-propagation if loss_levels.numel() == 0: # zero gt argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long) else: _, argmin = loss_levels.min(dim=0) # Reweight the loss of each (anchor, label) pair, so that only those # at the best gt level are back-propagated. losses_cls, losses_bbox, pos_inds = multi_apply( self.reweight_loss_single, losses_cls, losses_bbox, pos_assigned_gt_inds_list, labels_list, list(range(len(losses_cls))), min_levels=argmin) num_pos = torch.cat(pos_inds, 0).sum().float() pos_recall = self.calculate_pos_recall(cls_scores, labels_list, pos_inds) if num_pos == 0: # No gt avg_factor = num_pos + float(num_total_neg) else: avg_factor = num_pos for i in range(len(losses_cls)): losses_cls[i] /= avg_factor losses_bbox[i] /= avg_factor return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, num_pos=num_pos / batch_size, pos_recall=pos_recall) def calculate_pos_recall(self, cls_scores, labels_list, pos_inds): """Calculate positive recall with score threshold. Args: cls_scores (list[Tensor]): Classification scores at all fpn levels. Each tensor is in shape (N, num_classes * num_anchors, H, W) labels_list (list[Tensor]): The label that each anchor is assigned to. Shape (N * H * W * num_anchors, ) pos_inds (list[Tensor]): List of bool tensors indicating whether the anchor is assigned to a positive label. Shape (N * H * W * num_anchors, ) Returns: Tensor: A single float number indicating the positive recall. """ with torch.no_grad(): num_class = self.num_classes scores = [ cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos] for cls, pos in zip(cls_scores, pos_inds) ] labels = [ label.reshape(-1)[pos] for label, pos in zip(labels_list, pos_inds) ] scores = torch.cat(scores, dim=0) labels = torch.cat(labels, dim=0) if self.use_sigmoid_cls: scores = scores.sigmoid() else: scores = scores.softmax(dim=1) return accuracy(scores, labels, thresh=self.score_threshold) def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds, labels_seq): """Get the average loss in each FPN level w.r.t. each gt label. Args: cls_loss (Tensor): Classification loss of each feature map pixel, shape (num_anchor, num_class) reg_loss (Tensor): Regression loss of each feature map pixel, shape (num_anchor, 4) assigned_gt_inds (Tensor): It indicates which gt the prior is assigned to (0-based, -1: no assignment). shape (num_anchor), labels_seq: The rank of labels. shape (num_gt) Returns: shape: (num_gt), average loss of each gt in this level """ if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4) reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims if len(cls_loss.shape) == 2: cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims loss = cls_loss + reg_loss assert loss.size(0) == assigned_gt_inds.size(0) # Default loss value is 1e6 for a layer where no anchor is positive # to ensure it will not be chosen to back-propagate gradient losses_ = loss.new_full(labels_seq.shape, 1e6) for i, l in enumerate(labels_seq): match = assigned_gt_inds == l if match.any(): losses_[i] = loss[match].mean() return losses_, def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds, labels, level, min_levels): """Reweight loss values at each level. Reassign loss values at each level by masking those where the pre-calculated loss is too large. Then return the reduced losses. Args: cls_loss (Tensor): Element-wise classification loss. Shape: (num_anchors, num_classes) reg_loss (Tensor): Element-wise regression loss. Shape: (num_anchors, 4) assigned_gt_inds (Tensor): The gt indices that each anchor bbox is assigned to. -1 denotes a negative anchor, otherwise it is the gt index (0-based). Shape: (num_anchors, ), labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ). level (int): The current level index in the pyramid (0-4 for RetinaNet) min_levels (Tensor): The best-matching level for each gt. Shape: (num_gts, ), Returns: tuple: - cls_loss: Reduced corrected classification loss. Scalar. - reg_loss: Reduced corrected regression loss. Scalar. - pos_flags (Tensor): Corrected bool tensor indicating the final positive anchors. Shape: (num_anchors, ). """ loc_weight = torch.ones_like(reg_loss) cls_weight = torch.ones_like(cls_loss) pos_flags = assigned_gt_inds >= 0 # positive pixel flag pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten() if pos_flags.any(): # pos pixels exist pos_assigned_gt_inds = assigned_gt_inds[pos_flags] zeroing_indices = (min_levels[pos_assigned_gt_inds] != level) neg_indices = pos_indices[zeroing_indices] if neg_indices.numel(): pos_flags[neg_indices] = 0 loc_weight[neg_indices] = 0 # Only the weight corresponding to the label is # zeroed out if not selected zeroing_labels = labels[neg_indices] assert (zeroing_labels >= 0).all() cls_weight[neg_indices, zeroing_labels] = 0 # Weighted loss for both cls and reg loss cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum') reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum') return cls_loss, reg_loss, pos_flags ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ga_retina_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import MaskedConv2d from ..builder import HEADS from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead @HEADS.register_module() class GARetinaHead(GuidedAnchorHead): """Guided-Anchor-based RetinaNet head.""" def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): if init_cfg is None: init_cfg = dict( type='Normal', layer='Conv2d', std=0.01, override=[ dict( type='Normal', name='conv_loc', std=0.01, bias_prob=0.01), dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01) ]) self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(GARetinaHead, self).__init__( num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, 1) self.feature_adaption_cls = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.feature_adaption_reg = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.retina_cls = MaskedConv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = MaskedConv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def forward_single(self, x): """Forward feature map of a single scale level.""" cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) loc_pred = self.conv_loc(cls_feat) shape_pred = self.conv_shape(reg_feat) cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.retina_cls(cls_feat, mask) bbox_pred = self.retina_reg(reg_feat, mask) return cls_score, bbox_pred, shape_pred, loc_pred ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ga_rpn_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv import ConfigDict from mmcv.ops import nms from ..builder import HEADS from .guided_anchor_head import GuidedAnchorHead @HEADS.register_module() class GARPNHead(GuidedAnchorHead): """Guided-Anchor-based RPN head.""" def __init__(self, in_channels, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_loc', std=0.01, bias_prob=0.01)), **kwargs): super(GARPNHead, self).__init__( 1, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) super(GARPNHead, self)._init_layers() def forward_single(self, x): """Forward feature of a single scale level.""" x = self.rpn_conv(x) x = F.relu(x, inplace=True) (cls_score, bbox_pred, shape_pred, loc_pred) = super(GARPNHead, self).forward_single(x) return cls_score, bbox_pred, shape_pred, loc_pred def loss(self, cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, img_metas, gt_bboxes_ignore=None): losses = super(GARPNHead, self).loss( cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, None, img_metas, gt_bboxes_ignore=gt_bboxes_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'], loss_anchor_shape=losses['loss_shape'], loss_anchor_loc=losses['loss_loc']) def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, mlvl_masks, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You ' \ f'set max_num and max_per_img at the same time, ' \ f'but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ 'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ f'iou_threshold in nms and ' \ f'nms_thr at the same time, but get ' \ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the ' \ f'nms_thr which will be deprecated.' assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \ 'naive nms.' mlvl_proposals = [] for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] anchors = mlvl_anchors[idx] mask = mlvl_masks[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = rpn_cls_score.softmax(dim=1)[:, :-1] # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)[mask, :] if scores.dim() == 0: rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: _, topk_inds = scores.topk(cfg.nms_pre) rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] scores = scores[topk_inds] # get proposals w.r.t. anchors and rpn_bbox_pred proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_shape) # filter out too small bboxes if cfg.min_bbox_size >= 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): proposals = proposals[valid_mask] scores = scores[valid_mask] # NMS in current level proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold) proposals = proposals[:cfg.nms_post, :] mlvl_proposals.append(proposals) proposals = torch.cat(mlvl_proposals, 0) if cfg.get('nms_across_levels', False): # NMS across multi levels proposals, _ = nms(proposals[:, :4], proposals[:, -1], cfg.nms.iou_threshold) proposals = proposals[:cfg.max_per_img, :] else: scores = proposals[:, 4] num = min(cfg.max_per_img, proposals.shape[0]) _, topk_inds = scores.topk(num) proposals = proposals[topk_inds, :] return proposals ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/gfl_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, bbox_overlaps, build_assigner, build_sampler, images_to_levels, multi_apply, reduce_mean, unmap) from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS, build_loss from .anchor_head import AnchorHead class Integral(nn.Module): """A fixed layer for calculating integral result from distribution. This layer calculates the target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Default: 16. You may want to reset it according to your new dataset or related settings. """ def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): """Forward feature from the regression head to get integral result of bounding box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the box center in four directions, shape (N, 4). """ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x @HEADS.register_module() class GFLHead(AnchorHead): """Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection. GFL head structure is similar with ATSS, however GFL uses 1) joint representation for classification and localization quality, and 2) flexible General distribution for bounding box locations, which are supervised by Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively https://arxiv.org/abs/2006.04388 Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 4. conv_cfg (dict): dictionary to construct and config conv layer. Default: None. norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_qfl (dict): Config of Quality Focal Loss (QFL). bbox_coder (dict): Config of bbox coder. Defaults 'DistancePointBBoxCoder'. reg_max (int): Max value of integral set :math: `{0, ..., reg_max}` in QFL setting. Default: 16. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> self = GFLHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_quality_score, bbox_pred = self.forward(feats) >>> assert len(cls_quality_score) == len(self.scales) """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), bbox_coder=dict(type='DistancePointBBoxCoder'), reg_max=16, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='gfl_cls', std=0.01, bias_prob=0.01)), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_max = reg_max super(GFLHead, self).__init__( num_classes, in_channels, bbox_coder=bbox_coder, init_cfg=init_cfg, **kwargs) self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.integral = Integral(self.reg_max) self.loss_dfl = build_loss(loss_dfl) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) assert self.num_anchors == 1, 'anchor free version' self.gfl_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.gfl_reg = nn.Conv2d( self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification and quality (IoU) joint scores for all scale levels, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box distribution logits for all scale levels, each is a 4D-tensor, the channel number is 4*(n+1), n is max value of integral set. """ return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: cls_score (Tensor): Cls and quality joint scores for a single scale level the channel number is num_classes. bbox_pred (Tensor): Box distribution logits for a single scale level, the channel number is 4*(n+1), n is max value of integral set. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.gfl_cls(cls_feat) bbox_pred = scale(self.gfl_reg(reg_feat)).float() return cls_score, bbox_pred def anchor_center(self, anchors): """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. Returns: Tensor: Anchor centers with shape (N, 2), "xy" format. """ anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2 anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, bbox_targets, stride, num_total_samples): """Compute loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (tuple): Stride in this scale level. num_total_samples (int): Number of positive samples that is reduced over all GPUs. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos, dtype=torch.float, device=device)).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, losses_dfl,\ avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, num_total_samples=num_total_samples) avg_factor = sum(avg_factor) avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. GFL head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate( zip(cls_score_list, bbox_pred_list, self.prior_generator.strides, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] assert stride[0] == stride[1] bbox_pred = bbox_pred.permute(1, 2, 0) bbox_pred = self.integral(bbox_pred) * stride[0] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self.bbox_coder.decode( self.anchor_center(priors), bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) return self._bbox_post_process( mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale=rescale, with_nms=with_nms) def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get targets for GFL head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_anchors, valid_flags, num_level_anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors Tensor): Number of anchors of each scale level. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. anchors (Tensor): All anchors in the image with shape (N, 4). labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4). pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) assign_result = self.assigner.assign(anchors, num_level_anchors_inside, gt_bboxes, gt_bboxes_ignore, gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/guided_anchor_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn from mmcv.ops import DeformConv2d, MaskedConv2d from mmcv.runner import BaseModule, force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder, build_prior_generator, build_sampler, calc_region, images_to_levels, multi_apply, multiclass_nms, unmap) from ..builder import HEADS, build_loss from .anchor_head import AnchorHead class FeatureAdaption(BaseModule): """Feature Adaption Module. Feature Adaption Module is implemented based on DCN v1. It uses anchor shape prediction rather than feature map to predict offsets of deform conv layer. Args: in_channels (int): Number of channels in the input feature map. out_channels (int): Number of channels in the output feature map. kernel_size (int): Deformable conv kernel size. deform_groups (int): Deformable conv group size. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, kernel_size=3, deform_groups=4, init_cfg=dict( type='Normal', layer='Conv2d', std=0.1, override=dict( type='Normal', name='conv_adaption', std=0.01))): super(FeatureAdaption, self).__init__(init_cfg) offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 2, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def forward(self, x, shape): offset = self.conv_offset(shape.detach()) x = self.relu(self.conv_adaption(x, offset)) return x @HEADS.register_module() class GuidedAnchorHead(AnchorHead): """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). This GuidedAnchorHead will predict high-quality feature guided anchors and locations where anchors will be kept in inference. There are mainly 3 categories of bounding-boxes. - Sampled 9 pairs for target assignment. (approxes) - The square boxes where the predicted anchors are based on. (squares) - Guided anchors. Please refer to https://arxiv.org/abs/1901.03278 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. approx_anchor_generator (dict): Config dict for approx generator square_anchor_generator (dict): Config dict for square generator anchor_coder (dict): Config dict for anchor coder bbox_coder (dict): Config dict for bbox coder reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. deform_groups: (int): Group number of DCN in FeatureAdaption module. loc_filter_thr (float): Threshold to filter out unconcerned regions. loss_loc (dict): Config of location loss. loss_shape (dict): Config of anchor shape loss. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of bbox regression loss. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes, in_channels, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0] ), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0] ), reg_decoded_bbox=False, deform_groups=4, loc_filter_thr=0.01, train_cfg=None, test_cfg=None, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0), init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv_loc', std=0.01, bias_prob=0.01))): # yapf: disable super(AnchorHead, self).__init__(init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.deform_groups = deform_groups self.loc_filter_thr = loc_filter_thr # build approx_anchor_generator and square_anchor_generator assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = build_prior_generator( approx_anchor_generator) self.square_anchor_generator = build_prior_generator( square_anchor_generator) self.approxs_per_octave = self.approx_anchor_generator \ .num_base_priors[0] self.reg_decoded_bbox = reg_decoded_bbox # one anchor per location self.num_base_priors = self.square_anchor_generator.num_base_priors[0] self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] self.sampling = loss_cls['type'] not in ['FocalLoss'] self.ga_sampling = train_cfg is not None and hasattr( train_cfg, 'ga_sampler') if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 # build bbox_coder self.anchor_coder = build_bbox_coder(anchor_coder) self.bbox_coder = build_bbox_coder(bbox_coder) # build losses self.loss_loc = build_loss(loss_loc) self.loss_shape = build_loss(loss_shape) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.ga_assigner = build_assigner(self.train_cfg.ga_assigner) if self.ga_sampling: ga_sampler_cfg = self.train_cfg.ga_sampler else: ga_sampler_cfg = dict(type='PseudoSampler') self.ga_sampler = build_sampler(ga_sampler_cfg, context=self) self.fp16_enabled = False self._init_layers() @property def num_anchors(self): warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.square_anchor_generator.num_base_priors[0] def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) self.conv_shape = nn.Conv2d(self.in_channels, self.num_base_priors * 2, 1) self.feature_adaption = FeatureAdaption( self.in_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = MaskedConv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 1) self.conv_reg = MaskedConv2d(self.feat_channels, self.num_base_priors * 4, 1) def forward_single(self, x): loc_pred = self.conv_loc(x) shape_pred = self.conv_shape(x) x = self.feature_adaption(x, shape_pred) # masked conv is only used during inference for speed-up if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.conv_cls(x, mask) bbox_pred = self.conv_reg(x, mask) return cls_score, bbox_pred, shape_pred, loc_pred def forward(self, feats): return multi_apply(self.forward_single, feats) def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'): """Get sampled approxs and inside flags according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: approxes of each image, inside flags of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # approxes for one time multi_level_approxs = self.approx_anchor_generator.grid_priors( featmap_sizes, device=device) approxs_list = [multi_level_approxs for _ in range(num_imgs)] # for each image, we compute inside flags of multi level approxes inside_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] multi_level_approxs = approxs_list[img_id] # obtain valid flags for each approx first multi_level_approx_flags = self.approx_anchor_generator \ .valid_flags(featmap_sizes, img_meta['pad_shape'], device=device) for i, flags in enumerate(multi_level_approx_flags): approxs = multi_level_approxs[i] inside_flags_list = [] for i in range(self.approxs_per_octave): split_valid_flags = flags[i::self.approxs_per_octave] split_approxs = approxs[i::self.approxs_per_octave, :] inside_flags = anchor_inside_flags( split_approxs, split_valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) inside_flags_list.append(inside_flags) # inside_flag for a position is true if any anchor in this # position is true inside_flags = ( torch.stack(inside_flags_list, 0).sum(dim=0) > 0) multi_level_flags.append(inside_flags) inside_flag_list.append(multi_level_flags) return approxs_list, inside_flag_list def get_anchors(self, featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=False, device='cuda'): """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. shape_preds (list[tensor]): Multi-level shape predictions. loc_preds (list[tensor]): Multi-level location predictions. img_metas (list[dict]): Image meta info. use_loc_filter (bool): Use loc filter or not. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image, guided anchors of each image, loc masks of each image """ num_imgs = len(img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_priors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] # for each image, we compute multi level guided anchors guided_anchors_list = [] loc_mask_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_guided_anchors = [] multi_level_loc_mask = [] for i in range(num_levels): squares = squares_list[img_id][i] shape_pred = shape_preds[i][img_id] loc_pred = loc_preds[i][img_id] guided_anchors, loc_mask = self._get_guided_anchors_single( squares, shape_pred, loc_pred, use_loc_filter=use_loc_filter) multi_level_guided_anchors.append(guided_anchors) multi_level_loc_mask.append(loc_mask) guided_anchors_list.append(multi_level_guided_anchors) loc_mask_list.append(multi_level_loc_mask) return squares_list, guided_anchors_list, loc_mask_list def _get_guided_anchors_single(self, squares, shape_pred, loc_pred, use_loc_filter=False): """Get guided anchors and loc masks for a single level. Args: square (tensor): Squares of a single level. shape_pred (tensor): Shape predictions of a single level. loc_pred (tensor): Loc predictions of a single level. use_loc_filter (list[tensor]): Use loc filter or not. Returns: tuple: guided anchors, location masks """ # calculate location filtering mask loc_pred = loc_pred.sigmoid().detach() if use_loc_filter: loc_mask = loc_pred >= self.loc_filter_thr else: loc_mask = loc_pred >= 0.0 mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_base_priors) mask = mask.contiguous().view(-1) # calculate guided anchors squares = squares[mask] anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( -1, 2).detach()[mask] bbox_deltas = anchor_deltas.new_full(squares.size(), 0) bbox_deltas[:, 2:] = anchor_deltas guided_anchors = self.anchor_coder.decode( squares, bbox_deltas, wh_ratio_clip=1e-6) return guided_anchors, mask def ga_loc_targets(self, gt_bboxes_list, featmap_sizes): """Compute location targets for guided anchoring. Each feature map is divided into positive, negative and ignore regions. - positive regions: target 1, weight 1 - ignore regions: target 0, weight 0 - negative regions: target 0, weight 0.1 Args: gt_bboxes_list (list[Tensor]): Gt bboxes of each image. featmap_sizes (list[tuple]): Multi level sizes of each feature maps. Returns: tuple """ anchor_scale = self.approx_anchor_generator.octave_base_scale anchor_strides = self.approx_anchor_generator.strides # Currently only supports same stride in x and y direction. for stride in anchor_strides: assert (stride[0] == stride[1]) anchor_strides = [stride[0] for stride in anchor_strides] center_ratio = self.train_cfg.center_ratio ignore_ratio = self.train_cfg.ignore_ratio img_per_gpu = len(gt_bboxes_list) num_lvls = len(featmap_sizes) r1 = (1 - center_ratio) / 2 r2 = (1 - ignore_ratio) / 2 all_loc_targets = [] all_loc_weights = [] all_ignore_map = [] for lvl_id in range(num_lvls): h, w = featmap_sizes[lvl_id] loc_targets = torch.zeros( img_per_gpu, 1, h, w, device=gt_bboxes_list[0].device, dtype=torch.float32) loc_weights = torch.full_like(loc_targets, -1) ignore_map = torch.zeros_like(loc_targets) all_loc_targets.append(loc_targets) all_loc_weights.append(loc_weights) all_ignore_map.append(ignore_map) for img_id in range(img_per_gpu): gt_bboxes = gt_bboxes_list[img_id] scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) # assign gt bboxes to different feature levels w.r.t. their scales target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() for gt_id in range(gt_bboxes.size(0)): lvl = target_lvls[gt_id].item() # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] # calculate ignore regions ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[lvl]) # calculate positive (center) regions ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( gt_, r1, featmap_sizes[lvl]) all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0 all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 # calculate ignore map on nearby low level feature if lvl > 0: d_lvl = lvl - 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[d_lvl]) all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 # calculate ignore map on nearby high level feature if lvl < num_lvls - 1: u_lvl = lvl + 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[u_lvl]) all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 for lvl_id in range(num_lvls): # ignore negative regions w.r.t. ignore map all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0 # set negative regions with weight 0.1 all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 # loc average factor to balance loss loc_avg_factor = sum( [t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200 return all_loc_targets, all_loc_weights, loc_avg_factor def _ga_shape_target_single(self, flat_approxs, inside_flags, flat_squares, gt_bboxes, gt_bboxes_ignore, img_meta, unmap_outputs=True): """Compute guided anchoring targets. This function returns sampled anchors and gt bboxes directly rather than calculates regression targets. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_bboxes (Tensor): Ground truth bboxes of a single image. img_meta (dict): Meta info of a single image. approxs_per_octave (int): number of approxs per octave cfg (dict): RPN train configs. unmap_outputs (bool): unmap outputs or not. Returns: tuple """ if not inside_flags.any(): return (None, ) * 5 # assign gt and sample anchors expand_inside_flags = inside_flags[:, None].expand( -1, self.approxs_per_octave).reshape(-1) approxs = flat_approxs[expand_inside_flags, :] squares = flat_squares[inside_flags, :] assign_result = self.ga_assigner.assign(approxs, squares, self.approxs_per_octave, gt_bboxes, gt_bboxes_ignore) sampling_result = self.ga_sampler.sample(assign_result, squares, gt_bboxes) bbox_anchors = torch.zeros_like(squares) bbox_gts = torch.zeros_like(squares) bbox_weights = torch.zeros_like(squares) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes bbox_weights[pos_inds, :] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds) def ga_shape_targets(self, approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, unmap_outputs=True): """Compute guided anchoring targets. Args: approx_list (list[list]): Multi level approxs of each image. inside_flag_list (list[list]): Multi level inside flags of each image. square_list (list[list]): Multi level squares of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. unmap_outputs (bool): unmap outputs or not. Returns: tuple """ num_imgs = len(img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, img_metas, unmap_outputs=unmap_outputs) # no valid anchors if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares) bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares) return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos, num_total_neg) def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts, anchor_weights, anchor_total_num): shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) bbox_anchors = bbox_anchors.contiguous().view(-1, 4) bbox_gts = bbox_gts.contiguous().view(-1, 4) anchor_weights = anchor_weights.contiguous().view(-1, 4) bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) bbox_deltas[:, 2:] += shape_pred # filter out negative samples to speed-up weighted_bounded_iou_loss inds = torch.nonzero( anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1) bbox_deltas_ = bbox_deltas[inds] bbox_anchors_ = bbox_anchors[inds] bbox_gts_ = bbox_gts[inds] anchor_weights_ = anchor_weights[inds] pred_anchors_ = self.anchor_coder.decode( bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6) loss_shape = self.loss_shape( pred_anchors_, bbox_gts_, anchor_weights_, avg_factor=anchor_total_num) return loss_shape def loss_loc_single(self, loc_pred, loc_target, loc_weight, loc_avg_factor): loss_loc = self.loss_loc( loc_pred.reshape(-1, 1), loc_target.reshape(-1).long(), loc_weight.reshape(-1), avg_factor=loc_avg_factor) return loss_loc @force_fp32( apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) def loss(self, cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get loc targets loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets( gt_bboxes, featmap_sizes) # get sampled approxes approxs_list, inside_flag_list = self.get_sampled_approxs( featmap_sizes, img_metas, device=device) # get squares and guided anchors squares_list, guided_anchors_list, _ = self.get_anchors( featmap_sizes, shape_preds, loc_preds, img_metas, device=device) # get shape targets shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list, squares_list, gt_bboxes, img_metas) if shape_targets is None: return None (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num, anchor_bg_num) = shape_targets anchor_total_num = ( anchor_fg_num if not self.ga_sampling else anchor_fg_num + anchor_bg_num) # get anchor targets label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( guided_anchors_list, inside_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [ anchors.size(0) for anchors in guided_anchors_list[0] ] # concat all level anchors to a single tensor concat_anchor_list = [] for i in range(len(guided_anchors_list)): concat_anchor_list.append(torch.cat(guided_anchors_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) # get classification and bbox regression losses losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) # get anchor location loss losses_loc = [] for i in range(len(loc_preds)): loss_loc = self.loss_loc_single( loc_preds[i], loc_targets[i], loc_weights[i], loc_avg_factor=loc_avg_factor) losses_loc.append(loss_loc) # get anchor shape loss losses_shape = [] for i in range(len(shape_preds)): loss_shape = self.loss_shape_single( shape_preds[i], bbox_anchors_list[i], bbox_gts_list[i], anchor_weights_list[i], anchor_total_num=anchor_total_num) losses_shape.append(loss_shape) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_shape=losses_shape, loss_loc=losses_loc) @force_fp32( apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) def get_bboxes(self, cls_scores, bbox_preds, shape_preds, loc_preds, img_metas, cfg=None, rescale=False): assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( loc_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device # get guided anchors _, guided_anchors, loc_masks = self.get_anchors( featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=not self.training, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] guided_anchor_list = [ guided_anchors[img_id][i].detach() for i in range(num_levels) ] loc_mask_list = [ loc_masks[img_id][i].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, guided_anchor_list, loc_mask_list, img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, mlvl_masks, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, mlvl_anchors, mlvl_masks): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue # reshape scores and bbox_pred cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask, :] bbox_pred = bbox_pred[mask, :] if scores.dim() == 0: anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) bbox_pred = bbox_pred.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) # multi class NMS det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/lad_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.core import bbox_overlaps, multi_apply from ..builder import HEADS from .paa_head import PAAHead, levels_to_images @HEADS.register_module() class LADHead(PAAHead): """Label Assignment Head from the paper: `Improving Object Detection by Label Assignment Distillation `_""" @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def get_label_assignment(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Get label assignment (from teacher). Args: cls_scores (list[Tensor]): Box scores for each scale level. Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when are computing the loss. Returns: tuple: Returns a tuple containing label assignment variables. - labels (Tensor): Labels of all anchors, each with shape (num_anchors,). - labels_weight (Tensor): Label weights of all anchor. each with shape (num_anchors,). - bboxes_target (Tensor): BBox targets of all anchors. each with shape (num_anchors, 4). - bboxes_weight (Tensor): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds_flatten (Tensor): Contains all index of positive sample in all anchor. - pos_anchors (Tensor): Positive anchors. - num_pos (int): Number of positive anchors. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): reassign_labels, reassign_label_weight, \ reassign_bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor labels = torch.cat(reassign_labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) if num_pos: pos_anchors = flatten_anchors[pos_inds_flatten] else: pos_anchors = None label_assignment_results = (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) return label_assignment_results def forward_train(self, x, label_assignment_results, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, **kwargs): """Forward train with the available label assignment (student receives from teacher). Args: x (list[Tensor]): Features from FPN. label_assignment_results (tuple): As the outputs defined in the function `self.get_label_assignment`. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). Returns: losses: (dict[str, Tensor]): A dictionary of loss components. """ outs = self(x) if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss( *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore, label_assignment_results=label_assignment_results) return losses @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None, label_assignment_results=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when are computing the loss. label_assignment_results (tuple): As the outputs defined in the function `self.get_label_assignment`. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) = label_assignment_results cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( pos_anchors, bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, avg_factor=num_pos) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ld_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.core import bbox_overlaps, multi_apply, reduce_mean from ..builder import HEADS, build_loss from .gfl_head import GFLHead @HEADS.register_module() class LDHead(GFLHead): """Localization distillation Head. (Short description) It utilizes the learned bbox distributions to transfer the localization dark knowledge from teacher to student. Original paper: `Localization Distillation for Object Detection. `_ Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. loss_ld (dict): Config of Localization Distillation Loss (LD), T is the temperature for distillation. """ def __init__(self, num_classes, in_channels, loss_ld=dict( type='LocalizationDistillationLoss', loss_weight=0.25, T=10), **kwargs): super(LDHead, self).__init__(num_classes, in_channels, **kwargs) self.loss_ld = build_loss(loss_ld) def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, bbox_targets, stride, soft_targets, num_total_samples): """Compute loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (tuple): Stride in this scale level. num_total_samples (int): Number of positive samples that is reduced over all GPUs. Returns: dict[tuple, Tensor]: Loss components and weight targets. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) soft_targets = soft_targets.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) pos_soft_targets = soft_targets[pos_inds] soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) # ld loss loss_ld = self.loss_ld( pred_corners, soft_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_ld = bbox_pred.sum() * 0 loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum() def forward_train(self, x, out_teacher, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None, **kwargs): """ Args: x (list[Tensor]): Features from FPN. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used Returns: tuple[dict, list]: The loss components and proposals of each image. - losses (dict[str, Tensor]): A dictionary of loss components. - proposal_list (list[Tensor]): Proposals of each image. """ outs = self(x) soft_target = out_teacher[1] if gt_labels is None: loss_inputs = outs + (gt_bboxes, soft_target, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, soft_target, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) if proposal_cfg is None: return losses else: proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg) return losses, proposal_list @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, soft_target, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos, dtype=torch.float, device=device)).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, losses_dfl, losses_ld, \ avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, soft_target, num_total_samples=num_total_samples) avg_factor = sum(avg_factor) + 1e-6 avg_factor = reduce_mean(avg_factor).item() losses_bbox = [x / avg_factor for x in losses_bbox] losses_dfl = [x / avg_factor for x in losses_dfl] return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl, loss_ld=losses_ld) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/mask2former_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init from mmcv.cnn.bricks.transformer import (build_positional_encoding, build_transformer_layer_sequence) from mmcv.ops import point_sample from mmcv.runner import ModuleList from mmdet.core import build_assigner, build_sampler, reduce_mean from mmdet.models.utils import get_uncertain_point_coords_with_randomness from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead from .maskformer_head import MaskFormerHead @HEADS.register_module() class Mask2FormerHead(MaskFormerHead): """Implements the Mask2Former head. See `Masked-attention Mask Transformer for Universal Image Segmentation `_ for details. Args: in_channels (list[int]): Number of channels in the input feature map. feat_channels (int): Number of channels for features. out_channels (int): Number of channels for output. num_things_classes (int): Number of things. num_stuff_classes (int): Number of stuff. num_queries (int): Number of query in Transformer decoder. pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel decoder. Defaults to None. enforce_decoder_input_project (bool, optional): Whether to add a layer to change the embed_dim of tranformer encoder in pixel decoder to the embed_dim of transformer decoder. Defaults to False. transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for transformer decoder. Defaults to None. positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for transformer decoder position encoding. Defaults to None. loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification loss. Defaults to None. loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss. Defaults to None. loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss. Defaults to None. train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of Mask2Former head. test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of Mask2Former head. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, in_channels, feat_channels, out_channels, num_things_classes=80, num_stuff_classes=53, num_queries=100, num_transformer_feat_level=3, pixel_decoder=None, enforce_decoder_input_project=False, transformer_decoder=None, positional_encoding=None, loss_cls=None, loss_mask=None, loss_dice=None, train_cfg=None, test_cfg=None, init_cfg=None, **kwargs): super(AnchorFreeHead, self).__init__(init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = self.num_things_classes + self.num_stuff_classes self.num_queries = num_queries self.num_transformer_feat_level = num_transformer_feat_level self.num_heads = transformer_decoder.transformerlayers.\ attn_cfgs.num_heads self.num_transformer_decoder_layers = transformer_decoder.num_layers assert pixel_decoder.encoder.transformerlayers.\ attn_cfgs.num_levels == num_transformer_feat_level pixel_decoder_ = copy.deepcopy(pixel_decoder) pixel_decoder_.update( in_channels=in_channels, feat_channels=feat_channels, out_channels=out_channels) self.pixel_decoder = build_plugin_layer(pixel_decoder_)[1] self.transformer_decoder = build_transformer_layer_sequence( transformer_decoder) self.decoder_embed_dims = self.transformer_decoder.embed_dims self.decoder_input_projs = ModuleList() # from low resolution to high resolution for _ in range(num_transformer_feat_level): if (self.decoder_embed_dims != feat_channels or enforce_decoder_input_project): self.decoder_input_projs.append( Conv2d( feat_channels, self.decoder_embed_dims, kernel_size=1)) else: self.decoder_input_projs.append(nn.Identity()) self.decoder_positional_encoding = build_positional_encoding( positional_encoding) self.query_embed = nn.Embedding(self.num_queries, feat_channels) self.query_feat = nn.Embedding(self.num_queries, feat_channels) # from low resolution to high resolution self.level_embed = nn.Embedding(self.num_transformer_feat_level, feat_channels) self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) self.mask_embed = nn.Sequential( nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, out_channels)) self.test_cfg = test_cfg self.train_cfg = train_cfg if train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) self.sampler = build_sampler(self.train_cfg.sampler, context=self) self.num_points = self.train_cfg.get('num_points', 12544) self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0) self.importance_sample_ratio = self.train_cfg.get( 'importance_sample_ratio', 0.75) self.class_weight = loss_cls.class_weight self.loss_cls = build_loss(loss_cls) self.loss_mask = build_loss(loss_mask) self.loss_dice = build_loss(loss_dice) def init_weights(self): for m in self.decoder_input_projs: if isinstance(m, Conv2d): caffe2_xavier_init(m, bias=0) self.pixel_decoder.init_weights() for p in self.transformer_decoder.parameters(): if p.dim() > 1: nn.init.xavier_normal_(p) def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, img_metas): """Compute classification and mask targets for one image. Args: cls_score (Tensor): Mask score logits from a single decoder layer for one image. Shape (num_queries, cls_out_channels). mask_pred (Tensor): Mask logits for a single decoder layer for one image. Shape (num_queries, h, w). gt_labels (Tensor): Ground truth class indices for one image with shape (num_gts, ). gt_masks (Tensor): Ground truth mask for each image, each with shape (num_gts, h, w). img_metas (dict): Image informtation. Returns: tuple[Tensor]: A tuple containing the following for one image. - labels (Tensor): Labels of each image. \ shape (num_queries, ). - label_weights (Tensor): Label weights of each image. \ shape (num_queries, ). - mask_targets (Tensor): Mask targets of each image. \ shape (num_queries, h, w). - mask_weights (Tensor): Mask weights of each image. \ shape (num_queries, ). - pos_inds (Tensor): Sampled positive indices for each \ image. - neg_inds (Tensor): Sampled negative indices for each \ image. """ # sample points num_queries = cls_score.shape[0] num_gts = gt_labels.shape[0] point_coords = torch.rand((1, self.num_points, 2), device=cls_score.device) # shape (num_queries, num_points) mask_points_pred = point_sample( mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1, 1)).squeeze(1) # shape (num_gts, num_points) gt_points_masks = point_sample( gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1, 1)).squeeze(1) # assign and sample assign_result = self.assigner.assign(cls_score, mask_points_pred, gt_labels, gt_points_masks, img_metas) sampling_result = self.sampler.sample(assign_result, mask_pred, gt_masks) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # label target labels = gt_labels.new_full((self.num_queries, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_labels.new_ones((self.num_queries, )) # mask target mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] mask_weights = mask_pred.new_zeros((self.num_queries, )) mask_weights[pos_inds] = 1.0 return (labels, label_weights, mask_targets, mask_weights, pos_inds, neg_inds) def loss_single(self, cls_scores, mask_preds, gt_labels_list, gt_masks_list, img_metas): """Loss function for outputs from a single decoder layer. Args: cls_scores (Tensor): Mask score logits from a single decoder layer for all images. Shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. mask_preds (Tensor): Mask logits for a pixel decoder for all images. Shape (batch_size, num_queries, h, w). gt_labels_list (list[Tensor]): Ground truth class indices for each image, each with shape (num_gts, ). gt_masks_list (list[Tensor]): Ground truth mask for each image, each with shape (num_gts, h, w). img_metas (list[dict]): List of image meta information. Returns: tuple[Tensor]: Loss components for outputs from a single \ decoder layer. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] mask_preds_list = [mask_preds[i] for i in range(num_imgs)] (labels_list, label_weights_list, mask_targets_list, mask_weights_list, num_total_pos, num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, gt_labels_list, gt_masks_list, img_metas) # shape (batch_size, num_queries) labels = torch.stack(labels_list, dim=0) # shape (batch_size, num_queries) label_weights = torch.stack(label_weights_list, dim=0) # shape (num_total_gts, h, w) mask_targets = torch.cat(mask_targets_list, dim=0) # shape (batch_size, num_queries) mask_weights = torch.stack(mask_weights_list, dim=0) # classfication loss # shape (batch_size * num_queries, ) cls_scores = cls_scores.flatten(0, 1) labels = labels.flatten(0, 1) label_weights = label_weights.flatten(0, 1) class_weight = cls_scores.new_tensor(self.class_weight) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=class_weight[labels].sum()) num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) num_total_masks = max(num_total_masks, 1) # extract positive ones # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) mask_preds = mask_preds[mask_weights > 0] if mask_targets.shape[0] == 0: # zero match loss_dice = mask_preds.sum() loss_mask = mask_preds.sum() return loss_cls, loss_mask, loss_dice with torch.no_grad(): points_coords = get_uncertain_point_coords_with_randomness( mask_preds.unsqueeze(1), None, self.num_points, self.oversample_ratio, self.importance_sample_ratio) # shape (num_total_gts, h, w) -> (num_total_gts, num_points) mask_point_targets = point_sample( mask_targets.unsqueeze(1).float(), points_coords).squeeze(1) # shape (num_queries, h, w) -> (num_queries, num_points) mask_point_preds = point_sample( mask_preds.unsqueeze(1), points_coords).squeeze(1) # dice loss loss_dice = self.loss_dice( mask_point_preds, mask_point_targets, avg_factor=num_total_masks) # mask loss # shape (num_queries, num_points) -> (num_queries * num_points, ) mask_point_preds = mask_point_preds.reshape(-1) # shape (num_total_gts, num_points) -> (num_total_gts * num_points, ) mask_point_targets = mask_point_targets.reshape(-1) loss_mask = self.loss_mask( mask_point_preds, mask_point_targets, avg_factor=num_total_masks * self.num_points) return loss_cls, loss_mask, loss_dice def forward_head(self, decoder_out, mask_feature, attn_mask_target_size): """Forward for head part which is called after every decoder layer. Args: decoder_out (Tensor): in shape (num_queries, batch_size, c). mask_feature (Tensor): in shape (batch_size, c, h, w). attn_mask_target_size (tuple[int, int]): target attention mask size. Returns: tuple: A tuple contain three elements. - cls_pred (Tensor): Classification scores in shape \ (batch_size, num_queries, cls_out_channels). \ Note `cls_out_channels` should includes background. - mask_pred (Tensor): Mask scores in shape \ (batch_size, num_queries,h, w). - attn_mask (Tensor): Attention mask in shape \ (batch_size * num_heads, num_queries, h, w). """ decoder_out = self.transformer_decoder.post_norm(decoder_out) decoder_out = decoder_out.transpose(0, 1) # shape (batch_size, num_queries, c) cls_pred = self.cls_embed(decoder_out) # shape (batch_size, num_queries, c) mask_embed = self.mask_embed(decoder_out) # shape (batch_size, num_queries, h, w) mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature) attn_mask = F.interpolate( mask_pred, attn_mask_target_size, mode='bilinear', align_corners=False) # shape (batch_size, num_queries, h, w) -> # (batch_size * num_head, num_queries, h*w) attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat( (1, self.num_heads, 1, 1)).flatten(0, 1) attn_mask = attn_mask.sigmoid() < 0.5 attn_mask = attn_mask.detach() return cls_pred, mask_pred, attn_mask def forward(self, feats, img_metas): """Forward function. Args: feats (list[Tensor]): Multi scale Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: A tuple contains two elements. - cls_pred_list (list[Tensor)]: Classification logits \ for each decoder layer. Each is a 3D-tensor with shape \ (batch_size, num_queries, cls_out_channels). \ Note `cls_out_channels` should includes background. - mask_pred_list (list[Tensor]): Mask logits for each \ decoder layer. Each with shape (batch_size, num_queries, \ h, w). """ batch_size = len(img_metas) mask_features, multi_scale_memorys = self.pixel_decoder(feats) # multi_scale_memorys (from low resolution to high resolution) decoder_inputs = [] decoder_positional_encodings = [] for i in range(self.num_transformer_feat_level): decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) # shape (batch_size, c, h, w) -> (h*w, batch_size, c) decoder_input = decoder_input.flatten(2).permute(2, 0, 1) level_embed = self.level_embed.weight[i].view(1, 1, -1) decoder_input = decoder_input + level_embed # shape (batch_size, c, h, w) -> (h*w, batch_size, c) mask = decoder_input.new_zeros( (batch_size, ) + multi_scale_memorys[i].shape[-2:], dtype=torch.bool) decoder_positional_encoding = self.decoder_positional_encoding( mask) decoder_positional_encoding = decoder_positional_encoding.flatten( 2).permute(2, 0, 1) decoder_inputs.append(decoder_input) decoder_positional_encodings.append(decoder_positional_encoding) # shape (num_queries, c) -> (num_queries, batch_size, c) query_feat = self.query_feat.weight.unsqueeze(1).repeat( (1, batch_size, 1)) query_embed = self.query_embed.weight.unsqueeze(1).repeat( (1, batch_size, 1)) cls_pred_list = [] mask_pred_list = [] cls_pred, mask_pred, attn_mask = self.forward_head( query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) for i in range(self.num_transformer_decoder_layers): level_idx = i % self.num_transformer_feat_level # if a mask is all True(all background), then set it all False. attn_mask[torch.where( attn_mask.sum(-1) == attn_mask.shape[-1])] = False # cross_attn + self_attn layer = self.transformer_decoder.layers[i] attn_masks = [attn_mask, None] query_feat = layer( query=query_feat, key=decoder_inputs[level_idx], value=decoder_inputs[level_idx], query_pos=query_embed, key_pos=decoder_positional_encodings[level_idx], attn_masks=attn_masks, query_key_padding_mask=None, # here we do not apply masking on padded region key_padding_mask=None) cls_pred, mask_pred, attn_mask = self.forward_head( query_feat, mask_features, multi_scale_memorys[ (i + 1) % self.num_transformer_feat_level].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) return cls_pred_list, mask_pred_list ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/maskformer_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init from mmcv.cnn.bricks.transformer import (build_positional_encoding, build_transformer_layer_sequence) from mmcv.runner import force_fp32 from mmdet.core import build_assigner, build_sampler, multi_apply, reduce_mean from mmdet.models.utils import preprocess_panoptic_gt from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead @HEADS.register_module() class MaskFormerHead(AnchorFreeHead): """Implements the MaskFormer head. See `Per-Pixel Classification is Not All You Need for Semantic Segmentation `_ for details. Args: in_channels (list[int]): Number of channels in the input feature map. feat_channels (int): Number of channels for feature. out_channels (int): Number of channels for output. num_things_classes (int): Number of things. num_stuff_classes (int): Number of stuff. num_queries (int): Number of query in Transformer. pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel decoder. Defaults to None. enforce_decoder_input_project (bool, optional): Whether to add a layer to change the embed_dim of tranformer encoder in pixel decoder to the embed_dim of transformer decoder. Defaults to False. transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for transformer decoder. Defaults to None. positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for transformer decoder position encoding. Defaults to None. loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification loss. Defaults to `CrossEntropyLoss`. loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss. Defaults to `FocalLoss`. loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss. Defaults to `DiceLoss`. train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of Maskformer head. test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of Maskformer head. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, in_channels, feat_channels, out_channels, num_things_classes=80, num_stuff_classes=53, num_queries=100, pixel_decoder=None, enforce_decoder_input_project=False, transformer_decoder=None, positional_encoding=None, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=[1.0] * 133 + [0.1]), loss_mask=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=20.0), loss_dice=dict( type='DiceLoss', use_sigmoid=True, activate=True, naive_dice=True, loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=None, **kwargs): super(AnchorFreeHead, self).__init__(init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = self.num_things_classes + self.num_stuff_classes self.num_queries = num_queries pixel_decoder.update( in_channels=in_channels, feat_channels=feat_channels, out_channels=out_channels) self.pixel_decoder = build_plugin_layer(pixel_decoder)[1] self.transformer_decoder = build_transformer_layer_sequence( transformer_decoder) self.decoder_embed_dims = self.transformer_decoder.embed_dims pixel_decoder_type = pixel_decoder.get('type') if pixel_decoder_type == 'PixelDecoder' and ( self.decoder_embed_dims != in_channels[-1] or enforce_decoder_input_project): self.decoder_input_proj = Conv2d( in_channels[-1], self.decoder_embed_dims, kernel_size=1) else: self.decoder_input_proj = nn.Identity() self.decoder_pe = build_positional_encoding(positional_encoding) self.query_embed = nn.Embedding(self.num_queries, out_channels) self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) self.mask_embed = nn.Sequential( nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, out_channels)) self.test_cfg = test_cfg self.train_cfg = train_cfg if train_cfg: self.assigner = build_assigner(train_cfg.get('assigner', None)) self.sampler = build_sampler( train_cfg.get('sampler', None), context=self) self.class_weight = loss_cls.get('class_weight', None) self.loss_cls = build_loss(loss_cls) self.loss_mask = build_loss(loss_mask) self.loss_dice = build_loss(loss_dice) def init_weights(self): if isinstance(self.decoder_input_proj, Conv2d): caffe2_xavier_init(self.decoder_input_proj, bias=0) self.pixel_decoder.init_weights() for p in self.transformer_decoder.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs, img_metas): """Preprocess the ground truth for all images. Args: gt_labels_list (list[Tensor]): Each is ground truth labels of each bbox, with shape (num_gts, ). gt_masks_list (list[BitmapMasks]): Each is ground truth masks of each instances of a image, shape (num_gts, h, w). gt_semantic_seg (Tensor | None): Ground truth of semantic segmentation with the shape (batch_size, n, h, w). [0, num_thing_class - 1] means things, [num_thing_class, num_class-1] means stuff, 255 means VOID. It's None when training instance segmentation. img_metas (list[dict]): List of image meta information. Returns: tuple: a tuple containing the following targets. - labels (list[Tensor]): Ground truth class indices\ for all images. Each with shape (n, ), n is the sum of\ number of stuff type and number of instance in a image. - masks (list[Tensor]): Ground truth mask for each\ image, each with shape (n, h, w). """ num_things_list = [self.num_things_classes] * len(gt_labels_list) num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list) if gt_semantic_segs is None: gt_semantic_segs = [None] * len(gt_labels_list) targets = multi_apply(preprocess_panoptic_gt, gt_labels_list, gt_masks_list, gt_semantic_segs, num_things_list, num_stuff_list, img_metas) labels, masks = targets return labels, masks def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list, gt_masks_list, img_metas): """Compute classification and mask targets for all images for a decoder layer. Args: cls_scores_list (list[Tensor]): Mask score logits from a single decoder layer for all images. Each with shape (num_queries, cls_out_channels). mask_preds_list (list[Tensor]): Mask logits from a single decoder layer for all images. Each with shape (num_queries, h, w). gt_labels_list (list[Tensor]): Ground truth class indices for all images. Each with shape (n, ), n is the sum of number of stuff type and number of instance in a image. gt_masks_list (list[Tensor]): Ground truth mask for each image, each with shape (n, h, w). img_metas (list[dict]): List of image meta information. Returns: tuple[list[Tensor]]: a tuple containing the following targets. - labels_list (list[Tensor]): Labels of all images.\ Each with shape (num_queries, ). - label_weights_list (list[Tensor]): Label weights\ of all images. Each with shape (num_queries, ). - mask_targets_list (list[Tensor]): Mask targets of\ all images. Each with shape (num_queries, h, w). - mask_weights_list (list[Tensor]): Mask weights of\ all images. Each with shape (num_queries, ). - num_total_pos (int): Number of positive samples in\ all images. - num_total_neg (int): Number of negative samples in\ all images. """ (labels_list, label_weights_list, mask_targets_list, mask_weights_list, pos_inds_list, neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list, mask_preds_list, gt_labels_list, gt_masks_list, img_metas) num_total_pos = sum((inds.numel() for inds in pos_inds_list)) num_total_neg = sum((inds.numel() for inds in neg_inds_list)) return (labels_list, label_weights_list, mask_targets_list, mask_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, img_metas): """Compute classification and mask targets for one image. Args: cls_score (Tensor): Mask score logits from a single decoder layer for one image. Shape (num_queries, cls_out_channels). mask_pred (Tensor): Mask logits for a single decoder layer for one image. Shape (num_queries, h, w). gt_labels (Tensor): Ground truth class indices for one image with shape (n, ). n is the sum of number of stuff type and number of instance in a image. gt_masks (Tensor): Ground truth mask for each image, each with shape (n, h, w). img_metas (dict): Image informtation. Returns: tuple[Tensor]: a tuple containing the following for one image. - labels (Tensor): Labels of each image. shape (num_queries, ). - label_weights (Tensor): Label weights of each image. shape (num_queries, ). - mask_targets (Tensor): Mask targets of each image. shape (num_queries, h, w). - mask_weights (Tensor): Mask weights of each image. shape (num_queries, ). - pos_inds (Tensor): Sampled positive indices for each image. - neg_inds (Tensor): Sampled negative indices for each image. """ target_shape = mask_pred.shape[-2:] if gt_masks.shape[0] > 0: gt_masks_downsampled = F.interpolate( gt_masks.unsqueeze(1).float(), target_shape, mode='nearest').squeeze(1).long() else: gt_masks_downsampled = gt_masks # assign and sample assign_result = self.assigner.assign(cls_score, mask_pred, gt_labels, gt_masks_downsampled, img_metas) sampling_result = self.sampler.sample(assign_result, mask_pred, gt_masks) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # label target labels = gt_labels.new_full((self.num_queries, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_labels.new_ones(self.num_queries) # mask target mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] mask_weights = mask_pred.new_zeros((self.num_queries, )) mask_weights[pos_inds] = 1.0 return (labels, label_weights, mask_targets, mask_weights, pos_inds, neg_inds) @force_fp32(apply_to=('all_cls_scores', 'all_mask_preds')) def loss(self, all_cls_scores, all_mask_preds, gt_labels_list, gt_masks_list, img_metas): """Loss function. Args: all_cls_scores (Tensor): Classification scores for all decoder layers with shape (num_decoder, batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. all_mask_preds (Tensor): Mask scores for all decoder layers with shape (num_decoder, batch_size, num_queries, h, w). gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (n, ). n is the sum of number of stuff type and number of instance in a image. gt_masks_list (list[Tensor]): Ground truth mask for each image with shape (n, h, w). img_metas (list[dict]): List of image meta information. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_dec_layers = len(all_cls_scores) all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)] img_metas_list = [img_metas for _ in range(num_dec_layers)] losses_cls, losses_mask, losses_dice = multi_apply( self.loss_single, all_cls_scores, all_mask_preds, all_gt_labels_list, all_gt_masks_list, img_metas_list) loss_dict = dict() # loss from the last decoder layer loss_dict['loss_cls'] = losses_cls[-1] loss_dict['loss_mask'] = losses_mask[-1] loss_dict['loss_dice'] = losses_dice[-1] # loss from other decoder layers num_dec_layer = 0 for loss_cls_i, loss_mask_i, loss_dice_i in zip( losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]): loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i num_dec_layer += 1 return loss_dict def loss_single(self, cls_scores, mask_preds, gt_labels_list, gt_masks_list, img_metas): """Loss function for outputs from a single decoder layer. Args: cls_scores (Tensor): Mask score logits from a single decoder layer for all images. Shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. mask_preds (Tensor): Mask logits for a pixel decoder for all images. Shape (batch_size, num_queries, h, w). gt_labels_list (list[Tensor]): Ground truth class indices for each image, each with shape (n, ). n is the sum of number of stuff types and number of instances in a image. gt_masks_list (list[Tensor]): Ground truth mask for each image, each with shape (n, h, w). img_metas (list[dict]): List of image meta information. Returns: tuple[Tensor]: Loss components for outputs from a single decoder\ layer. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] mask_preds_list = [mask_preds[i] for i in range(num_imgs)] (labels_list, label_weights_list, mask_targets_list, mask_weights_list, num_total_pos, num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, gt_labels_list, gt_masks_list, img_metas) # shape (batch_size, num_queries) labels = torch.stack(labels_list, dim=0) # shape (batch_size, num_queries) label_weights = torch.stack(label_weights_list, dim=0) # shape (num_total_gts, h, w) mask_targets = torch.cat(mask_targets_list, dim=0) # shape (batch_size, num_queries) mask_weights = torch.stack(mask_weights_list, dim=0) # classfication loss # shape (batch_size * num_queries, ) cls_scores = cls_scores.flatten(0, 1) labels = labels.flatten(0, 1) label_weights = label_weights.flatten(0, 1) class_weight = cls_scores.new_tensor(self.class_weight) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=class_weight[labels].sum()) num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) num_total_masks = max(num_total_masks, 1) # extract positive ones # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) mask_preds = mask_preds[mask_weights > 0] target_shape = mask_targets.shape[-2:] if mask_targets.shape[0] == 0: # zero match loss_dice = mask_preds.sum() loss_mask = mask_preds.sum() return loss_cls, loss_mask, loss_dice # upsample to shape of target # shape (num_total_gts, h, w) mask_preds = F.interpolate( mask_preds.unsqueeze(1), target_shape, mode='bilinear', align_corners=False).squeeze(1) # dice loss loss_dice = self.loss_dice( mask_preds, mask_targets, avg_factor=num_total_masks) # mask loss # FocalLoss support input of shape (n, num_class) h, w = mask_preds.shape[-2:] # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1) mask_preds = mask_preds.reshape(-1, 1) # shape (num_total_gts, h, w) -> (num_total_gts * h * w) mask_targets = mask_targets.reshape(-1) # target is (1 - mask_targets) !!! loss_mask = self.loss_mask( mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w) return loss_cls, loss_mask, loss_dice def forward(self, feats, img_metas): """Forward function. Args: feats (list[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: a tuple contains two elements. - all_cls_scores (Tensor): Classification scores for each\ scale level. Each is a 4D-tensor with shape\ (num_decoder, batch_size, num_queries, cls_out_channels).\ Note `cls_out_channels` should includes background. - all_mask_preds (Tensor): Mask scores for each decoder\ layer. Each with shape (num_decoder, batch_size,\ num_queries, h, w). """ batch_size = len(img_metas) input_img_h, input_img_w = img_metas[0]['batch_input_shape'] padding_mask = feats[-1].new_ones( (batch_size, input_img_h, input_img_w), dtype=torch.float32) for i in range(batch_size): img_h, img_w, _ = img_metas[i]['img_shape'] padding_mask[i, :img_h, :img_w] = 0 padding_mask = F.interpolate( padding_mask.unsqueeze(1), size=feats[-1].shape[-2:], mode='nearest').to(torch.bool).squeeze(1) # when backbone is swin, memory is output of last stage of swin. # when backbone is r50, memory is output of tranformer encoder. mask_features, memory = self.pixel_decoder(feats, img_metas) pos_embed = self.decoder_pe(padding_mask) memory = self.decoder_input_proj(memory) # shape (batch_size, c, h, w) -> (h*w, batch_size, c) memory = memory.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1) # shape (batch_size, h * w) padding_mask = padding_mask.flatten(1) # shape = (num_queries, embed_dims) query_embed = self.query_embed.weight # shape = (num_queries, batch_size, embed_dims) query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) target = torch.zeros_like(query_embed) # shape (num_decoder, num_queries, batch_size, embed_dims) out_dec = self.transformer_decoder( query=target, key=memory, value=memory, key_pos=pos_embed, query_pos=query_embed, key_padding_mask=padding_mask) # shape (num_decoder, batch_size, num_queries, embed_dims) out_dec = out_dec.transpose(1, 2) # cls_scores all_cls_scores = self.cls_embed(out_dec) # mask_preds mask_embed = self.mask_embed(out_dec) all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed, mask_features) return all_cls_scores, all_mask_preds def forward_train(self, feats, img_metas, gt_bboxes, gt_labels, gt_masks, gt_semantic_seg, gt_bboxes_ignore=None): """Forward function for training mode. Args: feats (list[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[Dict]): List of image information. gt_bboxes (list[Tensor]): Each element is ground truth bboxes of the image, shape (num_gts, 4). Not used here. gt_labels (list[Tensor]): Each element is ground truth labels of each box, shape (num_gts,). gt_masks (list[BitmapMasks]): Each element is masks of instances of a image, shape (num_gts, h, w). gt_semantic_seg (list[tensor] | None): Each element is the ground truth of semantic segmentation with the shape (N, H, W). [0, num_thing_class - 1] means things, [num_thing_class, num_class-1] means stuff, 255 means VOID. It's None when training instance segmentation. gt_bboxes_ignore (list[Tensor]): Ground truth bboxes to be ignored. Defaults to None. Returns: dict[str, Tensor]: a dictionary of loss components """ # not consider ignoring bboxes assert gt_bboxes_ignore is None # forward all_cls_scores, all_mask_preds = self(feats, img_metas) # preprocess ground truth gt_labels, gt_masks = self.preprocess_gt(gt_labels, gt_masks, gt_semantic_seg, img_metas) # loss losses = self.loss(all_cls_scores, all_mask_preds, gt_labels, gt_masks, img_metas) return losses def simple_test(self, feats, img_metas, **kwargs): """Test without augmentaton. Args: feats (list[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: A tuple contains two tensors. - mask_cls_results (Tensor): Mask classification logits,\ shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape \ (batch_size, num_queries, h, w). """ all_cls_scores, all_mask_preds = self(feats, img_metas) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/nasfcos_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmdet.models.dense_heads.fcos_head import FCOSHead from ..builder import HEADS @HEADS.register_module() class NASFCOSHead(FCOSHead): """Anchor-free head used in `NASFCOS `_. It is quite similar with FCOS head, except for the searched structure of classification branch and bbox regression branch, where a structure of "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead. """ def __init__(self, *args, init_cfg=None, **kwargs): if init_cfg is None: init_cfg = [ dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']), dict( type='Normal', std=0.01, override=[ dict(name='conv_reg'), dict(name='conv_centerness'), dict( name='conv_cls', type='Normal', std=0.01, bias_prob=0.01) ]), ] super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" dconv3x3_config = dict( type='DCNv2', kernel_size=3, use_bias=True, deform_groups=2, padding=1) conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) conv1x1_config = dict(type='Conv', kernel_size=1) self.arch_config = [ dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config ] self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i, op_ in enumerate(self.arch_config): op = copy.deepcopy(op_) chn = self.in_channels if i == 0 else self.feat_channels assert isinstance(op, dict) use_bias = op.pop('use_bias', False) padding = op.pop('padding', 0) kernel_size = op.pop('kernel_size') module = ConvModule( chn, self.feat_channels, kernel_size, stride=1, padding=padding, norm_cfg=self.norm_cfg, bias=use_bias, conv_cfg=op) self.cls_convs.append(copy.deepcopy(module)) self.reg_convs.append(copy.deepcopy(module)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/paa_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmcv.runner import force_fp32 from mmdet.core import multi_apply, multiclass_nms from mmdet.core.bbox.iou_calculators import bbox_overlaps from mmdet.models import HEADS from mmdet.models.dense_heads import ATSSHead EPS = 1e-12 try: import sklearn.mixture as skm except ImportError: skm = None def levels_to_images(mlvl_tensor): """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[torch.Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] @HEADS.register_module() class PAAHead(ATSSHead): """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU Prediction for Object Detection. Code is modified from the `official github repo `_. More details can be found in the `paper `_ . Args: topk (int): Select topk samples with smallest loss in each level. score_voting (bool): Whether to use score voting in post-process. covariance_type : String describing the type of covariance parameters to be used in :class:`sklearn.mixture.GaussianMixture`. It must be one of: - 'full': each component has its own general covariance matrix - 'tied': all components share the same general covariance matrix - 'diag': each component has its own diagonal covariance matrix - 'spherical': each component has its own single variance Default: 'diag'. From 'full' to 'spherical', the gmm fitting process is faster yet the performance could be influenced. For most cases, 'diag' should be a good choice. """ def __init__(self, *args, topk=9, score_voting=True, covariance_type='diag', **kwargs): # topk used in paa reassign process self.topk = topk self.with_score_voting = score_voting self.covariance_type = covariance_type super(PAAHead, self).__init__(*args, **kwargs) @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when are computing the loss. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): reassign_labels, reassign_label_weight, \ reassign_bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) labels = torch.cat(reassign_labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( flatten_anchors[pos_inds_flatten], bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, iou_target.clamp(min=EPS), avg_factor=iou_target.sum()) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight, bbox_target, bbox_weight, pos_inds): """Calculate loss of all potential positive samples obtained from first match process. Args: anchors (list[Tensor]): Anchors of each scale. cls_score (Tensor): Box scores of single image with shape (num_anchors, num_classes) bbox_pred (Tensor): Box energies / deltas of single image with shape (num_anchors, 4) label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_target (dict): Regression target of each anchor with shape (num_anchors, 4). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. Returns: Tensor: Losses of all positive samples in single image. """ if not len(pos_inds): return cls_score.new([]), anchors_all_level = torch.cat(anchors, 0) pos_scores = cls_score[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_label = label[pos_inds] pos_label_weight = label_weight[pos_inds] pos_bbox_target = bbox_target[pos_inds] pos_bbox_weight = bbox_weight[pos_inds] pos_anchors = anchors_all_level[pos_inds] pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred) # to keep loss dimension loss_cls = self.loss_cls( pos_scores, pos_label, pos_label_weight, avg_factor=1.0, reduction_override='none') loss_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, pos_bbox_weight, avg_factor=1.0, # keep same loss weight before reassign reduction_override='none') loss_cls = loss_cls.sum(-1) pos_loss = loss_bbox + loss_cls return pos_loss, def paa_reassign(self, pos_losses, label, label_weight, bbox_weight, pos_inds, pos_gt_inds, anchors): """Fit loss to GMM distribution and separate positive, ignore, negative samples again with GMM model. Args: pos_losses (Tensor): Losses of all positive samples in single image. label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. pos_gt_inds (Tensor): Gt_index of all positive samples got from first assign process. anchors (list[Tensor]): Anchors of each scale. Returns: tuple: Usually returns a tuple containing learning targets. - label (Tensor): classification target of each anchor after paa assign, with shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each anchor after paa assign, with shape (num_anchors). - bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). - num_pos (int): The number of positive samples after paa assign. """ if not len(pos_inds): return label, label_weight, bbox_weight, 0 label = label.clone() label_weight = label_weight.clone() bbox_weight = bbox_weight.clone() num_gt = pos_gt_inds.max() + 1 num_level = len(anchors) num_anchors_each_level = [item.size(0) for item in anchors] num_anchors_each_level.insert(0, 0) inds_level_interval = np.cumsum(num_anchors_each_level) pos_level_mask = [] for i in range(num_level): mask = (pos_inds >= inds_level_interval[i]) & ( pos_inds < inds_level_interval[i + 1]) pos_level_mask.append(mask) pos_inds_after_paa = [label.new_tensor([])] ignore_inds_after_paa = [label.new_tensor([])] for gt_ind in range(num_gt): pos_inds_gmm = [] pos_loss_gmm = [] gt_mask = pos_gt_inds == gt_ind for level in range(num_level): level_mask = pos_level_mask[level] level_gt_mask = level_mask & gt_mask value, topk_inds = pos_losses[level_gt_mask].topk( min(level_gt_mask.sum(), self.topk), largest=False) pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) pos_loss_gmm.append(value) pos_inds_gmm = torch.cat(pos_inds_gmm) pos_loss_gmm = torch.cat(pos_loss_gmm) # fix gmm need at least two sample if len(pos_inds_gmm) < 2: continue device = pos_inds_gmm.device pos_loss_gmm, sort_inds = pos_loss_gmm.sort() pos_inds_gmm = pos_inds_gmm[sort_inds] pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy() min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max() means_init = np.array([min_loss, max_loss]).reshape(2, 1) weights_init = np.array([0.5, 0.5]) precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full if self.covariance_type == 'spherical': precisions_init = precisions_init.reshape(2) elif self.covariance_type == 'diag': precisions_init = precisions_init.reshape(2, 1) elif self.covariance_type == 'tied': precisions_init = np.array([[1.0]]) if skm is None: raise ImportError('Please run "pip install sklearn" ' 'to install sklearn first.') gmm = skm.GaussianMixture( 2, weights_init=weights_init, means_init=means_init, precisions_init=precisions_init, covariance_type=self.covariance_type) gmm.fit(pos_loss_gmm) gmm_assignment = gmm.predict(pos_loss_gmm) scores = gmm.score_samples(pos_loss_gmm) gmm_assignment = torch.from_numpy(gmm_assignment).to(device) scores = torch.from_numpy(scores).to(device) pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme( gmm_assignment, scores, pos_inds_gmm) pos_inds_after_paa.append(pos_inds_temp) ignore_inds_after_paa.append(ignore_inds_temp) pos_inds_after_paa = torch.cat(pos_inds_after_paa) ignore_inds_after_paa = torch.cat(ignore_inds_after_paa) reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1) reassign_ids = pos_inds[reassign_mask] label[reassign_ids] = self.num_classes label_weight[ignore_inds_after_paa] = 0 bbox_weight[reassign_ids] = 0 num_pos = len(pos_inds_after_paa) return label, label_weight, bbox_weight, num_pos def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm): """A general separation scheme for gmm model. It separates a GMM distribution of candidate samples into three parts, 0 1 and uncertain areas, and you can implement other separation schemes by rewriting this function. Args: gmm_assignment (Tensor): The prediction of GMM which is of shape (num_samples,). The 0/1 value indicates the distribution that each sample comes from. scores (Tensor): The probability of sample coming from the fit GMM distribution. The tensor is of shape (num_samples,). pos_inds_gmm (Tensor): All the indexes of samples which are used to fit GMM model. The tensor is of shape (num_samples,) Returns: tuple[Tensor]: The indices of positive and ignored samples. - pos_inds_temp (Tensor): Indices of positive samples. - ignore_inds_temp (Tensor): Indices of ignore samples. """ # The implementation is (c) in Fig.3 in origin paper instead of (b). # You can refer to issues such as # https://github.com/kkhoot/PAA/issues/8 and # https://github.com/kkhoot/PAA/issues/9. fgs = gmm_assignment == 0 pos_inds_temp = fgs.new_tensor([], dtype=torch.long) ignore_inds_temp = fgs.new_tensor([], dtype=torch.long) if fgs.nonzero().numel(): _, pos_thr_ind = scores[fgs].topk(1) pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1] ignore_inds_temp = pos_inds_gmm.new_tensor([]) return pos_inds_temp, ignore_inds_temp def get_targets( self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True, ): """Get targets for PAA head. This method is almost the same as `AnchorHead.get_targets()`. We direct return the results from _get_targets_single instead map it to levels by images_to_levels function. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - labels (list[Tensor]): Labels of all anchors, each with shape (num_anchors,). - label_weights (list[Tensor]): Label weights of all anchor. each with shape (num_anchors,). - bbox_targets (list[Tensor]): BBox targets of all anchors. each with shape (num_anchors, 4). - bbox_weights (list[Tensor]): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds (list[Tensor]): Contains all index of positive sample in all anchor. - gt_inds (list[Tensor]): Contains all gt_index of positive sample in all anchor. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds, valid_neg_inds, sampling_result) = results # Due to valid flag of anchors, we have to calculate the real pos_inds # in origin anchor set. pos_inds = [] for i, single_labels in enumerate(labels): pos_mask = (0 <= single_labels) & ( single_labels < self.num_classes) pos_inds.append(pos_mask.nonzero().view(-1)) gt_inds = [item.pos_assigned_gt_inds for item in sampling_result] return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, gt_inds) def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. This method is same as `AnchorHead._get_targets_single()`. """ assert unmap_outputs, 'We must map outputs back to the original' \ 'set of anchors in PAAhead' return super(ATSSHead, self)._get_targets_single( flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, score_factors=None, img_metas=None, cfg=None, rescale=False, with_nms=True, **kwargs): assert with_nms, 'PAA only supports "with_nms=True" now and it ' \ 'means PAAHead does not support ' \ 'test-time augmentation' return super(ATSSHead, self).get_bboxes(cls_scores, bbox_preds, score_factors, img_metas, cfg, rescale, with_nms, **kwargs) def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factors from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_score_factors = [] for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() if 0 < nms_pre < scores.shape[0]: max_scores, _ = (scores * score_factor[:, None]).sqrt().max(dim=1) _, topk_inds = max_scores.topk(nms_pre) priors = priors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] score_factor = score_factor[topk_inds] bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_score_factors.append(score_factor) return self._bbox_post_process(mlvl_scores, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale, with_nms, mlvl_score_factors, **kwargs) def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, scale_factor, cfg, rescale=False, with_nms=True, mlvl_score_factors=None, **kwargs): """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually with_nms is False is used for aug test. Args: mlvl_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_bboxes, num_class). mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale levels of a single image, each item has shape (num_bboxes, 4). scale_factor (ndarray, optional): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. mlvl_score_factors (list[Tensor], optional): Score factor from all scale levels of a single image, each item has shape (num_bboxes, ). Default: None. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) mlvl_iou_preds = torch.cat(mlvl_score_factors) mlvl_nms_scores = (mlvl_scores * mlvl_iou_preds[:, None]).sqrt() det_bboxes, det_labels = multiclass_nms( mlvl_bboxes, mlvl_nms_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=None) if self.with_score_voting and len(det_bboxes) > 0: det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels, mlvl_bboxes, mlvl_nms_scores, cfg.score_thr) return det_bboxes, det_labels def score_voting(self, det_bboxes, det_labels, mlvl_bboxes, mlvl_nms_scores, score_thr): """Implementation of score voting method works on each remaining boxes after NMS procedure. Args: det_bboxes (Tensor): Remaining boxes after NMS procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). det_labels (Tensor): The label of remaining boxes, with shape (k, 1),Labels are 0-based. mlvl_bboxes (Tensor): All boxes before the NMS procedure, with shape (num_anchors,4). mlvl_nms_scores (Tensor): The scores of all boxes which is used in the NMS procedure, with shape (num_anchors, num_class) score_thr (float): The score threshold of bboxes. Returns: tuple: Usually returns a tuple containing voting results. - det_bboxes_voted (Tensor): Remaining boxes after score voting procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). - det_labels_voted (Tensor): Label of remaining bboxes after voting, with shape (num_anchors,). """ candidate_mask = mlvl_nms_scores > score_thr candidate_mask_nonzeros = candidate_mask.nonzero(as_tuple=False) candidate_inds = candidate_mask_nonzeros[:, 0] candidate_labels = candidate_mask_nonzeros[:, 1] candidate_bboxes = mlvl_bboxes[candidate_inds] candidate_scores = mlvl_nms_scores[candidate_mask] det_bboxes_voted = [] det_labels_voted = [] for cls in range(self.cls_out_channels): candidate_cls_mask = candidate_labels == cls if not candidate_cls_mask.any(): continue candidate_cls_scores = candidate_scores[candidate_cls_mask] candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask] det_cls_mask = det_labels == cls det_cls_bboxes = det_bboxes[det_cls_mask].view( -1, det_bboxes.size(-1)) det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4], candidate_cls_bboxes) for det_ind in range(len(det_cls_bboxes)): single_det_ious = det_candidate_ious[det_ind] pos_ious_mask = single_det_ious > 0.01 pos_ious = single_det_ious[pos_ious_mask] pos_bboxes = candidate_cls_bboxes[pos_ious_mask] pos_scores = candidate_cls_scores[pos_ious_mask] pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) * pos_scores)[:, None] voted_box = torch.sum( pis * pos_bboxes, dim=0) / torch.sum( pis, dim=0) voted_score = det_cls_bboxes[det_ind][-1:][None, :] det_bboxes_voted.append( torch.cat((voted_box[None, :], voted_score), dim=1)) det_labels_voted.append(cls) det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0) det_labels_voted = det_labels.new_tensor(det_labels_voted) return det_bboxes_voted, det_labels_voted ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/pisa_retinanet_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.core import images_to_levels from ..builder import HEADS from ..losses import carl_loss, isr_p from .retina_head import RetinaHead @HEADS.register_module() class PISARetinaHead(RetinaHead): """PISA Retinanet Head. The head owns the same structure with Retinanet Head, but differs in two aspects: 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to change the positive loss weights. 2. Classification-aware regression loss is adopted as a third loss. """ @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes of each image with shape (num_obj, 4). gt_labels (list[Tensor]): Ground truth labels of each image with shape (num_obj, 4). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. Default: None. Returns: dict: Loss dict, comprise classification loss, regression loss and carl loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) num_imgs = len(img_metas) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) for cls_score in cls_scores ] flatten_cls_scores = torch.cat( flatten_cls_scores, dim=1).reshape(-1, flatten_cls_scores[0].size(-1)) flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_bbox_preds = torch.cat( flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) flatten_label_weights = torch.cat( label_weights_list, dim=1).reshape(-1) flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) flatten_bbox_targets = torch.cat( bbox_targets_list, dim=1).reshape(-1, 4) flatten_bbox_weights = torch.cat( bbox_weights_list, dim=1).reshape(-1, 4) # Apply ISR-P isr_cfg = self.train_cfg.get('isr', None) if isr_cfg is not None: all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) with torch.no_grad(): all_targets = isr_p( flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg.isr) (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets # For convenience we compute loss once instead separating by fpn level, # so that we don't need to separate the weights by level again. # The result should be the same losses_cls = self.loss_cls( flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=num_total_samples) losses_bbox = self.loss_bbox( flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=num_total_samples) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) # CARL Loss carl_cfg = self.train_cfg.get('carl', None) if carl_cfg is not None: loss_carl = carl_loss( flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg.carl, avg_factor=num_total_pos, sigmoid=True, num_class=self.num_classes) loss_dict.update(loss_carl) return loss_dict ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/pisa_ssd_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import multi_apply from ..builder import HEADS from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p from .ssd_head import SSDHead # TODO: add loss evaluator for SSD @HEADS.register_module() class PISASSDHead(SSDHead): def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes of each image with shape (num_obj, 4). gt_labels (list[Tensor]): Ground truth labels of each image with shape (num_obj, 4). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. Default: None. Returns: dict: Loss dict, comprise classification loss regression loss and carl loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) isr_cfg = self.train_cfg.get('isr', None) all_targets = (all_labels.view(-1), all_label_weights.view(-1), all_bbox_targets.view(-1, 4), all_bbox_weights.view(-1, 4)) # apply ISR-P if isr_cfg is not None: all_targets = isr_p( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_bbox_preds.view(-1, 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg.isr, num_class=self.num_classes) (new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets all_labels = new_labels.view(all_labels.shape) all_label_weights = new_label_weights.view(all_label_weights.shape) all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) # add CARL loss carl_loss_cfg = self.train_cfg.get('carl', None) if carl_loss_cfg is not None: loss_carl = carl_loss( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_targets[0], all_bbox_preds.view(-1, 4), all_targets[2], SmoothL1Loss(beta=1.), **self.train_cfg.carl, avg_factor=num_total_pos, num_class=self.num_classes) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) if carl_loss_cfg is not None: loss_dict.update(loss_carl) return loss_dict ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/reppoints_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmdet.core import (build_assigner, build_sampler, images_to_levels, multi_apply, unmap) from mmdet.core.anchor.point_generator import MlvlPointGenerator from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead @HEADS.register_module() class RepPointsHead(AnchorFreeHead): """RepPoint head. Args: point_feat_channels (int): Number of channels of points features. gradient_mul (float): The multiplier to gradients from points refinement and recognition. point_strides (Iterable): points strides. point_base_scale (int): bbox scale for assigning labels. loss_cls (dict): Config of classification loss. loss_bbox_init (dict): Config of initial points loss. loss_bbox_refine (dict): Config of points loss in refinement. use_grid_points (bool): If we use bounding box representation, the reppoints is represented as grid points on the bounding box. center_init (bool): Whether to use center point assignment. transform_method (str): The methods to transform RepPoints to bbox. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 def __init__(self, num_classes, in_channels, point_feat_channels=256, num_points=9, gradient_mul=0.1, point_strides=[8, 16, 32, 64, 128], point_base_scale=4, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_init=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5), loss_bbox_refine=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), use_grid_points=False, center_init=True, transform_method='moment', moment_mul=0.01, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='reppoints_cls_out', std=0.01, bias_prob=0.01)), **kwargs): self.num_points = num_points self.point_feat_channels = point_feat_channels self.use_grid_points = use_grid_points self.center_init = center_init # we use deform conv to extract points features self.dcn_kernel = int(np.sqrt(num_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) assert self.dcn_kernel * self.dcn_kernel == num_points, \ 'The points number should be a square number.' assert self.dcn_kernel % 2 == 1, \ 'The points number should be an odd square number.' dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) super().__init__( num_classes, in_channels, loss_cls=loss_cls, init_cfg=init_cfg, **kwargs) self.gradient_mul = gradient_mul self.point_base_scale = point_base_scale self.point_strides = point_strides self.prior_generator = MlvlPointGenerator( self.point_strides, offset=0.) self.sampling = loss_cls['type'] not in ['FocalLoss'] if self.train_cfg: self.init_assigner = build_assigner(self.train_cfg.init.assigner) self.refine_assigner = build_assigner( self.train_cfg.refine.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.transform_method = transform_method if self.transform_method == 'moment': self.moment_transfer = nn.Parameter( data=torch.zeros(2), requires_grad=True) self.moment_mul = moment_mul self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 self.loss_bbox_init = build_loss(loss_bbox_init) self.loss_bbox_refine = build_loss(loss_bbox_refine) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points self.reppoints_cls_conv = DeformConv2d(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels, self.cls_out_channels, 1, 1, 0) self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels, self.point_feat_channels, 3, 1, 1) self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) def points2bbox(self, pts, y_first=True): """Converting the points set into bounding box. :param pts: the input points sets (fields), each points set (fields) is represented as 2n scalar. :param y_first: if y_first=True, the point set is represented as [y1, x1, y2, x2 ... yn, xn], otherwise the point set is represented as [x1, y1, x2, y2 ... xn, yn]. :return: each points set is converting to a bbox [x1, y1, x2, y2]. """ pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:]) pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1, ...] pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0, ...] if self.transform_method == 'minmax': bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'partial_minmax': pts_y = pts_y[:, :4, ...] pts_x = pts_x[:, :4, ...] bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'moment': pts_y_mean = pts_y.mean(dim=1, keepdim=True) pts_x_mean = pts_x.mean(dim=1, keepdim=True) pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True) pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True) moment_transfer = (self.moment_transfer * self.moment_mul) + ( self.moment_transfer.detach() * (1 - self.moment_mul)) moment_width_transfer = moment_transfer[0] moment_height_transfer = moment_transfer[1] half_width = pts_x_std * torch.exp(moment_width_transfer) half_height = pts_y_std * torch.exp(moment_height_transfer) bbox = torch.cat([ pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean + half_width, pts_y_mean + half_height ], dim=1) else: raise NotImplementedError return bbox def gen_grid_from_reg(self, reg, previous_boxes): """Base on the previous bboxes and regression values, we compute the regressed bboxes and generate the grids on the bboxes. :param reg: the regression value to previous bboxes. :param previous_boxes: previous bboxes. :return: generate grids on the regressed bboxes. """ b, _, h, w = reg.shape bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2. bwh = (previous_boxes[:, 2:, ...] - previous_boxes[:, :2, ...]).clamp(min=1e-6) grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp( reg[:, 2:, ...]) grid_wh = bwh * torch.exp(reg[:, 2:, ...]) grid_left = grid_topleft[:, [0], ...] grid_top = grid_topleft[:, [1], ...] grid_width = grid_wh[:, [0], ...] grid_height = grid_wh[:, [1], ...] intervel = torch.linspace(0., 1., self.dcn_kernel).view( 1, self.dcn_kernel, 1, 1).type_as(reg) grid_x = grid_left + grid_width * intervel grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1) grid_x = grid_x.view(b, -1, h, w) grid_y = grid_top + grid_height * intervel grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1) grid_y = grid_y.view(b, -1, h, w) grid_yx = torch.stack([grid_y, grid_x], dim=2) grid_yx = grid_yx.view(b, -1, h, w) regressed_bbox = torch.cat([ grid_left, grid_top, grid_left + grid_width, grid_top + grid_height ], 1) return grid_yx, regressed_bbox def forward(self, feats): return multi_apply(self.forward_single, feats) def forward_single(self, x): """Forward feature map of a single FPN level.""" dcn_base_offset = self.dcn_base_offset.type_as(x) # If we use center_init, the initial reppoints is from center points. # If we use bounding bbox representation, the initial reppoints is # from regular grid placed on a pre-defined bbox. if self.use_grid_points or not self.center_init: scale = self.point_base_scale / 2 points_init = dcn_base_offset / dcn_base_offset.max() * scale bbox_init = x.new_tensor([-scale, -scale, scale, scale]).view(1, 4, 1, 1) else: points_init = 0 cls_feat = x pts_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: pts_feat = reg_conv(pts_feat) # initialize reppoints pts_out_init = self.reppoints_pts_init_out( self.relu(self.reppoints_pts_init_conv(pts_feat))) if self.use_grid_points: pts_out_init, bbox_out_init = self.gen_grid_from_reg( pts_out_init, bbox_init.detach()) else: pts_out_init = pts_out_init + points_init # refine and classify reppoints pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach( ) + self.gradient_mul * pts_out_init dcn_offset = pts_out_init_grad_mul - dcn_base_offset cls_out = self.reppoints_cls_out( self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset))) pts_out_refine = self.reppoints_pts_refine_out( self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset))) if self.use_grid_points: pts_out_refine, bbox_out_refine = self.gen_grid_from_reg( pts_out_refine, bbox_out_init.detach()) else: pts_out_refine = pts_out_refine + pts_out_init.detach() if self.training: return cls_out, pts_out_init, pts_out_refine else: return cls_out, self.points2bbox(pts_out_refine) def get_points(self, featmap_sizes, img_metas, device): """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. Returns: tuple: points of each image, valid flags of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # points center for one time multi_level_points = self.prior_generator.grid_priors( featmap_sizes, device=device, with_stride=True) points_list = [[point.clone() for point in multi_level_points] for _ in range(num_imgs)] # for each image, we compute valid flags of multi level grids valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = self.prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape']) valid_flag_list.append(multi_level_flags) return points_list, valid_flag_list def centers_to_bboxes(self, point_list): """Get bboxes according to center points. Only used in :class:`MaxIoUAssigner`. """ bbox_list = [] for i_img, point in enumerate(point_list): bbox = [] for i_lvl in range(len(self.point_strides)): scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5 bbox_shift = torch.Tensor([-scale, -scale, scale, scale]).view(1, 4).type_as(point[0]) bbox_center = torch.cat( [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift) bbox_list.append(bbox) return bbox_list def offset_to_pts(self, center_list, pred_list): """Change from point offset to point coordinate.""" pts_list = [] for i_lvl in range(len(self.point_strides)): pts_lvl = [] for i_img in range(len(center_list)): pts_center = center_list[i_img][i_lvl][:, :2].repeat( 1, self.num_points) pts_shift = pred_list[i_lvl][i_img] yx_pts_shift = pts_shift.permute(1, 2, 0).view( -1, 2 * self.num_points) y_pts_shift = yx_pts_shift[..., 0::2] x_pts_shift = yx_pts_shift[..., 1::2] xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1) xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1) pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center pts_lvl.append(pts) pts_lvl = torch.stack(pts_lvl, 0) pts_list.append(pts_lvl) return pts_list def _point_target_single(self, flat_proposals, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, stage='init', unmap_outputs=True): inside_flags = valid_flags if not inside_flags.any(): return (None, ) * 7 # assign gt and sample proposals proposals = flat_proposals[inside_flags, :] if stage == 'init': assigner = self.init_assigner pos_weight = self.train_cfg.init.pos_weight else: assigner = self.refine_assigner pos_weight = self.train_cfg.refine.pos_weight assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, proposals, gt_bboxes) num_valid_proposals = proposals.shape[0] bbox_gt = proposals.new_zeros([num_valid_proposals, 4]) pos_proposals = torch.zeros_like(proposals) proposals_weights = proposals.new_zeros([num_valid_proposals, 4]) labels = proposals.new_full((num_valid_proposals, ), self.num_classes, dtype=torch.long) label_weights = proposals.new_zeros( num_valid_proposals, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_gt_bboxes = sampling_result.pos_gt_bboxes bbox_gt[pos_inds, :] = pos_gt_bboxes pos_proposals[pos_inds, :] = proposals[pos_inds, :] proposals_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of proposals if unmap_outputs: num_total_proposals = flat_proposals.size(0) labels = unmap(labels, num_total_proposals, inside_flags) label_weights = unmap(label_weights, num_total_proposals, inside_flags) bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags) pos_proposals = unmap(pos_proposals, num_total_proposals, inside_flags) proposals_weights = unmap(proposals_weights, num_total_proposals, inside_flags) return (labels, label_weights, bbox_gt, pos_proposals, proposals_weights, pos_inds, neg_inds) def get_targets(self, proposals_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, stage='init', label_channels=1, unmap_outputs=True): """Compute corresponding GT box and classification targets for proposals. Args: proposals_list (list[list]): Multi level points/bboxes of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_bboxes_list (list[Tensor]): Ground truth labels of each box. stage (str): `init` or `refine`. Generate target for init stage or refine stage label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501 - bbox_gt_list (list[Tensor]): Ground truth bbox of each level. - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501 - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501 - num_total_pos (int): Number of positive samples in all images. # noqa: E501 - num_total_neg (int): Number of negative samples in all images. # noqa: E501 """ assert stage in ['init', 'refine'] num_imgs = len(img_metas) assert len(proposals_list) == len(valid_flag_list) == num_imgs # points number of multi levels num_level_proposals = [points.size(0) for points in proposals_list[0]] # concat all level points and flags to a single tensor for i in range(num_imgs): assert len(proposals_list[i]) == len(valid_flag_list[i]) proposals_list[i] = torch.cat(proposals_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply( self._point_target_single, proposals_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, stage=stage, unmap_outputs=unmap_outputs) # no valid points if any([labels is None for labels in all_labels]): return None # sampled points of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) labels_list = images_to_levels(all_labels, num_level_proposals) label_weights_list = images_to_levels(all_label_weights, num_level_proposals) bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals) proposals_list = images_to_levels(all_proposals, num_level_proposals) proposal_weights_list = images_to_levels(all_proposal_weights, num_level_proposals) return (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg) def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels, label_weights, bbox_gt_init, bbox_weights_init, bbox_gt_refine, bbox_weights_refine, stride, num_total_samples_init, num_total_samples_refine): # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) cls_score = cls_score.contiguous() loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples_refine) # points loss bbox_gt_init = bbox_gt_init.reshape(-1, 4) bbox_weights_init = bbox_weights_init.reshape(-1, 4) bbox_pred_init = self.points2bbox( pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False) bbox_gt_refine = bbox_gt_refine.reshape(-1, 4) bbox_weights_refine = bbox_weights_refine.reshape(-1, 4) bbox_pred_refine = self.points2bbox( pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False) normalize_term = self.point_base_scale * stride loss_pts_init = self.loss_bbox_init( bbox_pred_init / normalize_term, bbox_gt_init / normalize_term, bbox_weights_init, avg_factor=num_total_samples_init) loss_pts_refine = self.loss_bbox_refine( bbox_pred_refine / normalize_term, bbox_gt_refine / normalize_term, bbox_weights_refine, avg_factor=num_total_samples_refine) return loss_cls, loss_pts_init, loss_pts_refine def loss(self, cls_scores, pts_preds_init, pts_preds_refine, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 # target for initial stage center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas, device) pts_coordinate_preds_init = self.offset_to_pts(center_list, pts_preds_init) if self.train_cfg.init.assigner['type'] == 'PointAssigner': # Assign target for center list candidate_list = center_list else: # transform center list to bbox list and # assign target for bbox list bbox_list = self.centers_to_bboxes(center_list) candidate_list = bbox_list cls_reg_targets_init = self.get_targets( candidate_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, stage='init', label_channels=label_channels) (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init, num_total_pos_init, num_total_neg_init) = cls_reg_targets_init num_total_samples_init = ( num_total_pos_init + num_total_neg_init if self.sampling else num_total_pos_init) # target for refinement stage center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas, device) pts_coordinate_preds_refine = self.offset_to_pts( center_list, pts_preds_refine) bbox_list = [] for i_img, center in enumerate(center_list): bbox = [] for i_lvl in range(len(pts_preds_refine)): bbox_preds_init = self.points2bbox( pts_preds_init[i_lvl].detach()) bbox_shift = bbox_preds_init * self.point_strides[i_lvl] bbox_center = torch.cat( [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4)) bbox_list.append(bbox) cls_reg_targets_refine = self.get_targets( bbox_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, stage='refine', label_channels=label_channels) (labels_list, label_weights_list, bbox_gt_list_refine, candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine, num_total_neg_refine) = cls_reg_targets_refine num_total_samples_refine = ( num_total_pos_refine + num_total_neg_refine if self.sampling else num_total_pos_refine) # compute loss losses_cls, losses_pts_init, losses_pts_refine = multi_apply( self.loss_single, cls_scores, pts_coordinate_preds_init, pts_coordinate_preds_refine, labels_list, label_weights_list, bbox_gt_list_init, bbox_weights_list_init, bbox_gt_list_refine, bbox_weights_list_refine, self.point_strides, num_total_samples_init=num_total_samples_init, num_total_samples_refine=num_total_samples_refine) loss_dict_all = { 'loss_cls': losses_cls, 'loss_pts_init': losses_pts_init, 'loss_pts_refine': losses_pts_refine } return loss_dict_all # Same as base_dense_head/_get_bboxes_single except self._bbox_decode def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. RepPoints head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 2). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, priors) in enumerate( zip(cls_score_list, bbox_pred_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1)[:, :-1] # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self._bbox_decode(priors, bbox_pred, self.point_strides[level_idx], img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) return self._bbox_post_process( mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale=rescale, with_nms=with_nms) def _bbox_decode(self, points, bbox_pred, stride, max_shape): bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) bboxes = bbox_pred * stride + bbox_pos_center x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1]) y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0]) x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1]) y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0]) decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return decoded_bboxes ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/retina_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaHead(AnchorHead): r"""An anchor-based head used in `RetinaNet `_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors. Example: >>> import torch >>> self = RetinaHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == (self.num_classes) >>> assert box_per_anchor == 4 """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(RetinaHead, self).__init__( num_classes, in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) return cls_score, bbox_pred ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/retina_sepbn_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaSepBNHead(AnchorHead): """"RetinaHead with separate BN. In RetinaHead, conv/norm layers are shared across different FPN levels, while in RetinaSepBNHead, conv layers are shared across different FPN levels, but BN layers are separated. """ def __init__(self, num_classes, num_ins, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.num_ins = num_ins super(RetinaSepBNHead, self).__init__( num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.num_ins): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(reg_convs) for i in range(self.stacked_convs): for j in range(1, self.num_ins): self.cls_convs[j][i].conv = self.cls_convs[0][i].conv self.reg_convs[j][i].conv = self.reg_convs[0][i].conv self.retina_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def init_weights(self): """Initialize weights of the head.""" super(RetinaSepBNHead, self).init_weights() for m in self.cls_convs[0]: normal_init(m.conv, std=0.01) for m in self.reg_convs[0]: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for i, x in enumerate(feats): cls_feat = feats[i] reg_feat = feats[i] for cls_conv in self.cls_convs[i]: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs[i]: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return cls_scores, bbox_preds ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/rpn_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.ops import batched_nms from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RPNHead(AnchorHead): """RPN head. Args: in_channels (int): Number of channels in the input feature map. init_cfg (dict or list[dict], optional): Initialization config dict. num_convs (int): Number of convolution layers in the head. Default 1. """ # noqa: W605 def __init__(self, in_channels, init_cfg=dict(type='Normal', layer='Conv2d', std=0.01), num_convs=1, **kwargs): self.num_convs = num_convs super(RPNHead, self).__init__( 1, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" if self.num_convs > 1: rpn_convs = [] for i in range(self.num_convs): if i == 0: in_channels = self.in_channels else: in_channels = self.feat_channels # use ``inplace=False`` to avoid error: one of the variables # needed for gradient computation has been modified by an # inplace operation. rpn_convs.append( ConvModule( in_channels, self.feat_channels, 3, padding=1, inplace=False)) self.rpn_conv = nn.Sequential(*rpn_convs) else: self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) self.rpn_cls = nn.Conv2d(self.feat_channels, self.num_base_priors * self.cls_out_channels, 1) self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_base_priors * 4, 1) def forward_single(self, x): """Forward feature map of a single scale level.""" x = self.rpn_conv(x) x = F.relu(x, inplace=False) rpn_cls_score = self.rpn_cls(x) rpn_bbox_pred = self.rpn_reg(x) return rpn_cls_score, rpn_bbox_pred def loss(self, cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ losses = super(RPNHead, self).loss( cls_scores, bbox_preds, gt_bboxes, None, img_metas, gt_bboxes_ignore=gt_bboxes_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_anchors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_anchors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_anchors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. RPN head does not need this value. mlvl_anchors (list[Tensor]): Anchors of all scale level each item has shape (num_anchors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] # bboxes from different level should be independent during NMS, # level_ids are used as labels for batched NMS to separate them level_ids = [] mlvl_scores = [] mlvl_bbox_preds = [] mlvl_valid_anchors = [] nms_pre = cfg.get('nms_pre', -1) for level_idx in range(len(cls_score_list)): rpn_cls_score = cls_score_list[level_idx] rpn_bbox_pred = bbox_pred_list[level_idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # We set FG labels to [0, num_class-1] and BG label to # num_class in RPN head since mmdet v2.5, which is unified to # be consistent with other head since mmdet v2.0. In mmdet v2.0 # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. scores = rpn_cls_score.softmax(dim=1)[:, 0] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) anchors = mlvl_anchors[level_idx] if 0 < nms_pre < scores.shape[0]: # sort is faster than topk # _, topk_inds = scores.topk(cfg.nms_pre) ranked_scores, rank_inds = scores.sort(descending=True) topk_inds = rank_inds[:nms_pre] scores = ranked_scores[:nms_pre] rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] mlvl_scores.append(scores) mlvl_bbox_preds.append(rpn_bbox_pred) mlvl_valid_anchors.append(anchors) level_ids.append( scores.new_full((scores.size(0), ), level_idx, dtype=torch.long)) return self._bbox_post_process(mlvl_scores, mlvl_bbox_preds, mlvl_valid_anchors, level_ids, cfg, img_shape) def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, mlvl_valid_anchors, level_ids, cfg, img_shape, **kwargs): """bbox post-processing method. Do the nms operation for bboxes in same level. Args: mlvl_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_bboxes, ). mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale levels of a single image, each item has shape (num_bboxes, 4). mlvl_valid_anchors (list[Tensor]): Anchors of all scale level each item has shape (num_bboxes, 4). level_ids (list[Tensor]): Indexes from all scale levels of a single image, each item has shape (num_bboxes, ). cfg (mmcv.Config): Test / postprocessing configuration, if None, `self.test_cfg` would be used. img_shape (tuple(int)): The shape of model's input image. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ scores = torch.cat(mlvl_scores) anchors = torch.cat(mlvl_valid_anchors) rpn_bbox_pred = torch.cat(mlvl_bboxes) proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_shape) ids = torch.cat(level_ids) if cfg.min_bbox_size >= 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): proposals = proposals[valid_mask] scores = scores[valid_mask] ids = ids[valid_mask] if proposals.numel() > 0: dets, _ = batched_nms(proposals, scores, ids, cfg.nms) else: return proposals.new_zeros(0, 5) return dets[:cfg.max_per_img] def onnx_export(self, x, img_metas): """Test without augmentation. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: Tensor: dets of shape [N, num_det, 5]. """ cls_scores, bbox_preds = self(x) assert len(cls_scores) == len(bbox_preds) batch_bboxes, batch_scores = super(RPNHead, self).onnx_export( cls_scores, bbox_preds, img_metas=img_metas, with_nms=False) # Use ONNX::NonMaxSuppression in deployment from mmdet.core.export import add_dummy_nms_for_onnx cfg = copy.deepcopy(self.test_cfg) score_threshold = cfg.nms.get('score_thr', 0.0) nms_pre = cfg.get('deploy_nms_pre', -1) # Different from the normal forward doing NMS level by level, # we do NMS across all levels when exporting ONNX. dets, _ = add_dummy_nms_for_onnx(batch_bboxes, batch_scores, cfg.max_per_img, cfg.nms.iou_threshold, score_threshold, nms_pre, cfg.max_per_img) return dets ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/sabl_retina_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import force_fp32 from mmdet.core import (build_assigner, build_bbox_coder, build_prior_generator, build_sampler, images_to_levels, multi_apply, unmap) from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin from .guided_anchor_head import GuidedAnchorHead @HEADS.register_module() class SABLRetinaHead(BaseDenseHead, BBoxTestMixin): """Side-Aware Boundary Localization (SABL) for RetinaNet. The anchor generation, assigning and sampling in SABLRetinaHead are the same as GuidedAnchorHead for guided anchoring. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of Convs for classification \ and regression branches. Defaults to 4. feat_channels (int): Number of hidden channels. \ Defaults to 256. approx_anchor_generator (dict): Config dict for approx generator. square_anchor_generator (dict): Config dict for square generator. conv_cfg (dict): Config dict for ConvModule. Defaults to None. norm_cfg (dict): Config dict for Norm Layer. Defaults to None. bbox_coder (dict): Config dict for bbox coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (dict): Training config of SABLRetinaHead. test_cfg (dict): Testing config of SABLRetinaHead. loss_cls (dict): Config of classification loss. loss_bbox_cls (dict): Config of classification loss for bbox branch. loss_bbox_reg (dict): Config of regression loss for bbox branch. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), conv_cfg=None, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01))): super(SABLRetinaHead, self).__init__(init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.num_buckets = bbox_coder['num_buckets'] self.side_num = int(np.ceil(self.num_buckets / 2)) assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = build_prior_generator( approx_anchor_generator) self.square_anchor_generator = build_prior_generator( square_anchor_generator) self.approxs_per_octave = ( self.approx_anchor_generator.num_base_priors[0]) # one anchor per location self.num_base_priors = self.square_anchor_generator.num_base_priors[0] self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.sampling = loss_cls['type'] not in [ 'FocalLoss', 'GHMC', 'QualityFocalLoss' ] if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox_cls = build_loss(loss_bbox_cls) self.loss_bbox_reg = build_loss(loss_bbox_reg) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self._init_layers() @property def num_anchors(self): warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.square_anchor_generator.num_base_priors[0] def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.retina_bbox_reg = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) self.retina_bbox_cls = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) def forward_single(self, x): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_cls_pred = self.retina_bbox_cls(reg_feat) bbox_reg_pred = self.retina_bbox_reg(reg_feat) bbox_pred = (bbox_cls_pred, bbox_reg_pred) return cls_score, bbox_pred def forward(self, feats): return multi_apply(self.forward_single, feats) def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_priors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] return squares_list def get_target(self, approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=None, sampling=True, unmap_outputs=True): """Compute bucketing targets. Args: approx_list (list[list]): Multi level approxs of each image. inside_flag_list (list[list]): Multi level inside flags of each image. square_list (list[list]): Multi level squares of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. gt_bboxes_list (list[Tensor]): Gt bboxes of each image. label_channels (int): Channel of label. sampling (bool): Sample Anchors or not. unmap_outputs (bool): unmap outputs or not. Returns: tuple: Returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each \ level. - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ each level. - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ each level. - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ each level. - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ each level. - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. """ num_imgs = len(img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_cls_targets, all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_squares) label_weights_list = images_to_levels(all_label_weights, num_level_squares) bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, num_level_squares) bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, num_level_squares) bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, num_level_squares) bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, num_level_squares) return (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_approxs, inside_flags, flat_squares, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=None, sampling=True, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_bboxes (Tensor): Ground truth bboxes of a single image, \ shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. sampling (bool): Sample Anchors or not. unmap_outputs (bool): unmap outputs or not. Returns: tuple: - labels_list (Tensor): Labels in a single image - label_weights (Tensor): Label weights in a single image - bbox_cls_targets (Tensor): BBox cls targets in a single image - bbox_cls_weights (Tensor): BBox cls weights in a single image - bbox_reg_targets (Tensor): BBox reg targets in a single image - bbox_reg_weights (Tensor): BBox reg weights in a single image - num_total_pos (int): Number of positive samples \ in a single image - num_total_neg (int): Number of negative samples \ in a single image """ if not inside_flags.any(): return (None, ) * 8 # assign gt and sample anchors expand_inside_flags = inside_flags[:, None].expand( -1, self.approxs_per_octave).reshape(-1) approxs = flat_approxs[expand_inside_flags, :] squares = flat_squares[inside_flags, :] assign_result = self.assigner.assign(approxs, squares, self.approxs_per_octave, gt_bboxes, gt_bboxes_ignore) sampling_result = self.sampler.sample(assign_result, squares, gt_bboxes) num_valid_squares = squares.shape[0] bbox_cls_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_cls_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) labels = squares.new_full((num_valid_squares, ), self.num_classes, dtype=torch.long) label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, pos_bbox_cls_weights) = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, inside_flags) bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, inside_flags) bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, inside_flags) bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds) def loss_single(self, cls_score, bbox_pred, labels, label_weights, bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, bbox_reg_weights, num_total_samples): # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # regression loss bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) (bbox_cls_pred, bbox_reg_pred) = bbox_pred bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) loss_bbox_cls = self.loss_bbox_cls( bbox_cls_pred, bbox_cls_targets.long(), bbox_cls_weights, avg_factor=num_total_samples * 4 * self.side_num) loss_bbox_reg = self.loss_bbox_reg( bbox_reg_pred, bbox_reg_targets, bbox_reg_weights, avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk) return loss_cls, loss_bbox_cls, loss_bbox_reg @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get sampled approxes approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( self, featmap_sizes, img_metas, device=device) square_list = self.get_anchors(featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_target( approxs_list, inside_flag_list, square_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.sampling) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( self.loss_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_samples=num_total_samples) return dict( loss_cls=losses_cls, loss_bbox_cls=losses_bbox_cls, loss_bbox_reg=losses_bbox_reg) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg=None, rescale=False): assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device mlvl_anchors = self.get_anchors( featmap_sizes, img_metas, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_cls_pred_list = [ bbox_preds[i][0][img_id].detach() for i in range(num_levels) ] bbox_reg_pred_list = [ bbox_preds[i][1][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single( cls_score_list, bbox_cls_pred_list, bbox_reg_pred_list, mlvl_anchors[img_id], img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_confids = [] mlvl_labels = [] assert len(cls_scores) == len(bbox_cls_preds) == len( bbox_reg_preds) == len(mlvl_anchors) for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): assert cls_score.size()[-2:] == bbox_cls_pred.size( )[-2:] == bbox_reg_pred.size()[-2::] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1)[:, :-1] bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict( anchors=anchors, bbox_cls_pred=bbox_cls_pred, bbox_reg_pred=bbox_reg_pred)) scores, labels, _, filtered_results = results anchors = filtered_results['anchors'] bbox_cls_pred = filtered_results['bbox_cls_pred'] bbox_reg_pred = filtered_results['bbox_reg_pred'] bbox_preds = [ bbox_cls_pred.contiguous(), bbox_reg_pred.contiguous() ] bboxes, confids = self.bbox_coder.decode( anchors.contiguous(), bbox_preds, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_confids.append(confids) mlvl_labels.append(labels) return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, scale_factor, cfg, rescale, True, mlvl_confids) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/solo_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmdet.core import InstanceData, mask_matrix_nms, multi_apply from mmdet.core.utils import center_of_mass, generate_coordinate from mmdet.models.builder import HEADS, build_loss from mmdet.utils.misc import floordiv from .base_mask_head import BaseMaskHead @HEADS.register_module() class SOLOHead(BaseMaskHead): """SOLO mask head used in `SOLO: Segmenting Objects by Locations. `_ Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. Default: 256. stacked_convs (int): Number of stacking convs of the head. Default: 4. strides (tuple): Downsample factor of each feature map. scale_ranges (tuple[tuple[int, int]]): Area range of multiple level masks, in the format [(min1, max1), (min2, max2), ...]. A range of (16, 64) means the area range between (16, 64). pos_scale (float): Constant scale factor to control the center region. num_grids (list[int]): Divided image into a uniform grids, each feature map has a different grid value. The number of output channels is grid ** 2. Default: [40, 36, 24, 16, 12]. cls_down_index (int): The index of downsample operation in classification branch. Default: 0. loss_mask (dict): Config of mask loss. loss_cls (dict): Config of classification loss. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). train_cfg (dict): Training config of head. test_cfg (dict): Testing config of head. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=None, loss_cls=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), train_cfg=None, test_cfg=None, init_cfg=[ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], ): super(SOLOHead, self).__init__(init_cfg) self.num_classes = num_classes self.cls_out_channels = self.num_classes self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.num_grids = num_grids # number of FPN feats self.num_levels = len(strides) assert self.num_levels == len(scale_ranges) == len(num_grids) self.scale_ranges = scale_ranges self.pos_scale = pos_scale self.cls_down_index = cls_down_index self.loss_cls = build_loss(loss_cls) self.loss_mask = build_loss(loss_mask) self.norm_cfg = norm_cfg self.init_cfg = init_cfg self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _init_layers(self): self.mask_convs = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels + 2 if i == 0 else self.feat_channels self.mask_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.conv_mask_list = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list.append( nn.Conv2d(self.feat_channels, num_grid**2, 1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def resize_feats(self, feats): """Downsample the first feat and upsample last feat in feats.""" out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate( feats[0], size=feats[i + 1].shape[-2:], mode='bilinear', align_corners=False)) elif i == len(feats) - 1: out.append( F.interpolate( feats[i], size=feats[i - 1].shape[-2:], mode='bilinear', align_corners=False)) else: out.append(feats[i]) return out def forward(self, feats): assert len(feats) == self.num_levels feats = self.resize_feats(feats) mlvl_mask_preds = [] mlvl_cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat = torch.cat([mask_feat, coord_feat], 1) for mask_layer in (self.mask_convs): mask_feat = mask_layer(mask_feat) mask_feat = F.interpolate( mask_feat, scale_factor=2, mode='bilinear') mask_pred = self.conv_mask_list[i](mask_feat) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred = F.interpolate( mask_pred.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mlvl_mask_preds.append(mask_pred) mlvl_cls_preds.append(cls_pred) return mlvl_mask_preds, mlvl_cls_preds def loss(self, mlvl_mask_preds, mlvl_cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes=None, **kwargs): """Calculate the loss of total batch. Args: mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). gt_labels (list[Tensor]): Labels of multiple images. gt_masks (list[Tensor]): Ground truth masks of multiple images. Each has shape (num_instances, h, w). img_metas (list[dict]): Meta information of multiple images. gt_bboxes (list[Tensor]): Ground truth bboxes of multiple images. Default: None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_levels = self.num_levels num_imgs = len(gt_labels) featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds] # `BoolTensor` in `pos_masks` represent # whether the corresponding point is # positive pos_mask_targets, labels, pos_masks = multi_apply( self._get_targets_single, gt_bboxes, gt_labels, gt_masks, featmap_sizes=featmap_sizes) # change from the outside list meaning multi images # to the outside list meaning multi levels mlvl_pos_mask_targets = [[] for _ in range(num_levels)] mlvl_pos_mask_preds = [[] for _ in range(num_levels)] mlvl_pos_masks = [[] for _ in range(num_levels)] mlvl_labels = [[] for _ in range(num_levels)] for img_id in range(num_imgs): assert num_levels == len(pos_mask_targets[img_id]) for lvl in range(num_levels): mlvl_pos_mask_targets[lvl].append( pos_mask_targets[img_id][lvl]) mlvl_pos_mask_preds[lvl].append( mlvl_mask_preds[lvl][img_id, pos_masks[img_id][lvl], ...]) mlvl_pos_masks[lvl].append(pos_masks[img_id][lvl].flatten()) mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) # cat multiple image temp_mlvl_cls_preds = [] for lvl in range(num_levels): mlvl_pos_mask_targets[lvl] = torch.cat( mlvl_pos_mask_targets[lvl], dim=0) mlvl_pos_mask_preds[lvl] = torch.cat( mlvl_pos_mask_preds[lvl], dim=0) mlvl_pos_masks[lvl] = torch.cat(mlvl_pos_masks[lvl], dim=0) mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) num_pos = sum(item.sum() for item in mlvl_pos_masks) # dice loss loss_mask = [] for pred, target in zip(mlvl_pos_mask_preds, mlvl_pos_mask_targets): if pred.size()[0] == 0: loss_mask.append(pred.sum().unsqueeze(0)) continue loss_mask.append( self.loss_mask(pred, target, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = torch.cat(loss_mask).mean() flatten_labels = torch.cat(mlvl_labels) flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def _get_targets_single(self, gt_bboxes, gt_labels, gt_masks, featmap_sizes=None): """Compute targets for predictions of single image. Args: gt_bboxes (Tensor): Ground truth bbox of each instance, shape (num_gts, 4). gt_labels (Tensor): Ground truth label of each instance, shape (num_gts,). gt_masks (Tensor): Ground truth mask of each instance, shape (num_gts, h, w). featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Default: None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_pos_masks (list[Tensor]): Each element is a `BoolTensor` to represent whether the corresponding point in single level is positive, has shape (num_grid **2). """ device = gt_labels.device gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) mlvl_pos_mask_targets = [] mlvl_labels = [] mlvl_pos_masks = [] for (lower_bound, upper_bound), stride, featmap_size, num_grid \ in zip(self.scale_ranges, self.strides, featmap_sizes, self.num_grids): mask_target = torch.zeros( [num_grid**2, featmap_size[0], featmap_size[1]], dtype=torch.uint8, device=device) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes labels = torch.zeros([num_grid, num_grid], dtype=torch.int64, device=device) + self.num_classes pos_mask = torch.zeros([num_grid**2], dtype=torch.bool, device=device) gt_inds = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(gt_inds) == 0: mlvl_pos_mask_targets.append( mask_target.new_zeros(0, featmap_size[0], featmap_size[1])) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) continue hit_gt_bboxes = gt_bboxes[gt_inds] hit_gt_labels = gt_labels[gt_inds] hit_gt_masks = gt_masks[gt_inds, ...] pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - hit_gt_bboxes[:, 0]) * self.pos_scale pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - hit_gt_bboxes[:, 1]) * self.pos_scale # Make sure hit_gt_masks has a value valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 output_stride = stride / 2 for gt_mask, gt_label, pos_h_range, pos_w_range, \ valid_mask_flag in \ zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, pos_w_ranges, valid_mask_flags): if not valid_mask_flag: continue upsampled_size = (featmap_sizes[0][0] * 4, featmap_sizes[0][1] * 4) center_h, center_w = center_of_mass(gt_mask) coord_w = int( floordiv((center_w / upsampled_size[1]), (1. / num_grid), rounding_mode='trunc')) coord_h = int( floordiv((center_h / upsampled_size[0]), (1. / num_grid), rounding_mode='trunc')) # left, top, right, down top_box = max( 0, int( floordiv( (center_h - pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) down_box = min( num_grid - 1, int( floordiv( (center_h + pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) left_box = max( 0, int( floordiv( (center_w - pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) right_box = min( num_grid - 1, int( floordiv( (center_w + pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) top = max(top_box, coord_h - 1) down = min(down_box, coord_h + 1) left = max(coord_w - 1, left_box) right = min(right_box, coord_w + 1) labels[top:(down + 1), left:(right + 1)] = gt_label # ins gt_mask = np.uint8(gt_mask.cpu().numpy()) # Follow the original implementation, F.interpolate is # different from cv2 and opencv gt_mask = mmcv.imrescale(gt_mask, scale=1. / output_stride) gt_mask = torch.from_numpy(gt_mask).to(device=device) for i in range(top, down + 1): for j in range(left, right + 1): index = int(i * num_grid + j) mask_target[index, :gt_mask.shape[0], :gt_mask. shape[1]] = gt_mask pos_mask[index] = True mlvl_pos_mask_targets.append(mask_target[pos_mask]) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) return mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks def get_results(self, mlvl_mask_preds, mlvl_cls_scores, img_metas, **kwargs): """Get multi-image mask results. Args: mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ mlvl_cls_scores = [ item.permute(0, 2, 3, 1) for item in mlvl_cls_scores ] assert len(mlvl_mask_preds) == len(mlvl_cls_scores) num_levels = len(mlvl_cls_scores) results_list = [] for img_id in range(len(img_metas)): cls_pred_list = [ mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) for lvl in range(num_levels) ] mask_pred_list = [ mlvl_mask_preds[lvl][img_id] for lvl in range(num_levels) ] cls_pred_list = torch.cat(cls_pred_list, dim=0) mask_pred_list = torch.cat(mask_pred_list, dim=0) results = self._get_results_single( cls_pred_list, mask_pred_list, img_meta=img_metas[img_id]) results_list.append(results) return results_list def _get_results_single(self, cls_scores, mask_preds, img_meta, cfg=None): """Get processed mask related results of single image. Args: cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds (Tensor): Mask prediction of all points in single image, has shape (num_points, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict, optional): Config used in test phase. Default: None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(results, cls_scores): """Generate a empty results.""" results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) results.labels = cls_scores.new_ones(0) return results cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(mask_preds) results = InstanceData(img_meta) featmap_size = mask_preds.size()[-2:] img_shape = results.img_shape ori_shape = results.ori_shape h, w, _ = img_shape upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] if len(cls_scores) == 0: return empty_results(results, cls_scores) inds = score_mask.nonzero() cls_labels = inds[:, 1] # Filter the mask mask with an area is smaller than # stride of corresponding feature level lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) strides = cls_scores.new_ones(lvl_interval[-1]) strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= self.strides[lvl] strides = strides[inds[:, 0]] mask_preds = mask_preds[inds[:, 0]] masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(results, cls_scores) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear')[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0) masks = mask_preds > cfg.mask_thr results.masks = masks results.labels = labels results.scores = scores return results @HEADS.register_module() class DecoupledSOLOHead(SOLOHead): """Decoupled SOLO mask head used in `SOLO: Segmenting Objects by Locations. `_ Args: init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, init_cfg=[ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_x')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_y')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs): super(DecoupledSOLOHead, self).__init__( *args, init_cfg=init_cfg, **kwargs) def _init_layers(self): self.mask_convs_x = nn.ModuleList() self.mask_convs_y = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels + 1 if i == 0 else self.feat_channels self.mask_convs_x.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.mask_convs_y.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.conv_mask_list_x = nn.ModuleList() self.conv_mask_list_y = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list_x.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_mask_list_y.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, feats): assert len(feats) == self.num_levels feats = self.resize_feats(feats) mask_preds_x = [] mask_preds_y = [] cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat_x = torch.cat([mask_feat, coord_feat[:, 0:1, ...]], 1) mask_feat_y = torch.cat([mask_feat, coord_feat[:, 1:2, ...]], 1) for mask_layer_x, mask_layer_y in \ zip(self.mask_convs_x, self.mask_convs_y): mask_feat_x = mask_layer_x(mask_feat_x) mask_feat_y = mask_layer_y(mask_feat_y) mask_feat_x = F.interpolate( mask_feat_x, scale_factor=2, mode='bilinear') mask_feat_y = F.interpolate( mask_feat_y, scale_factor=2, mode='bilinear') mask_pred_x = self.conv_mask_list_x[i](mask_feat_x) mask_pred_y = self.conv_mask_list_y[i](mask_feat_y) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred_x = F.interpolate( mask_pred_x.sigmoid(), size=upsampled_size, mode='bilinear') mask_pred_y = F.interpolate( mask_pred_y.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mask_preds_x.append(mask_pred_x) mask_preds_y.append(mask_pred_y) cls_preds.append(cls_pred) return mask_preds_x, mask_preds_y, cls_preds def loss(self, mlvl_mask_preds_x, mlvl_mask_preds_y, mlvl_cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes=None, **kwargs): """Calculate the loss of total batch. Args: mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). gt_labels (list[Tensor]): Labels of multiple images. gt_masks (list[Tensor]): Ground truth masks of multiple images. Each has shape (num_instances, h, w). img_metas (list[dict]): Meta information of multiple images. gt_bboxes (list[Tensor]): Ground truth bboxes of multiple images. Default: None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_levels = self.num_levels num_imgs = len(gt_labels) featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds_x] pos_mask_targets, labels, \ xy_pos_indexes = \ multi_apply(self._get_targets_single, gt_bboxes, gt_labels, gt_masks, featmap_sizes=featmap_sizes) # change from the outside list meaning multi images # to the outside list meaning multi levels mlvl_pos_mask_targets = [[] for _ in range(num_levels)] mlvl_pos_mask_preds_x = [[] for _ in range(num_levels)] mlvl_pos_mask_preds_y = [[] for _ in range(num_levels)] mlvl_labels = [[] for _ in range(num_levels)] for img_id in range(num_imgs): for lvl in range(num_levels): mlvl_pos_mask_targets[lvl].append( pos_mask_targets[img_id][lvl]) mlvl_pos_mask_preds_x[lvl].append( mlvl_mask_preds_x[lvl][img_id, xy_pos_indexes[img_id][lvl][:, 1]]) mlvl_pos_mask_preds_y[lvl].append( mlvl_mask_preds_y[lvl][img_id, xy_pos_indexes[img_id][lvl][:, 0]]) mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) # cat multiple image temp_mlvl_cls_preds = [] for lvl in range(num_levels): mlvl_pos_mask_targets[lvl] = torch.cat( mlvl_pos_mask_targets[lvl], dim=0) mlvl_pos_mask_preds_x[lvl] = torch.cat( mlvl_pos_mask_preds_x[lvl], dim=0) mlvl_pos_mask_preds_y[lvl] = torch.cat( mlvl_pos_mask_preds_y[lvl], dim=0) mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) num_pos = 0. # dice loss loss_mask = [] for pred_x, pred_y, target in \ zip(mlvl_pos_mask_preds_x, mlvl_pos_mask_preds_y, mlvl_pos_mask_targets): num_masks = pred_x.size(0) if num_masks == 0: # make sure can get grad loss_mask.append((pred_x.sum() + pred_y.sum()).unsqueeze(0)) continue num_pos += num_masks pred_mask = pred_y.sigmoid() * pred_x.sigmoid() loss_mask.append( self.loss_mask(pred_mask, target, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = torch.cat(loss_mask).mean() # cate flatten_labels = torch.cat(mlvl_labels) flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def _get_targets_single(self, gt_bboxes, gt_labels, gt_masks, featmap_sizes=None): """Compute targets for predictions of single image. Args: gt_bboxes (Tensor): Ground truth bbox of each instance, shape (num_gts, 4). gt_labels (Tensor): Ground truth label of each instance, shape (num_gts,). gt_masks (Tensor): Ground truth mask of each instance, shape (num_gts, h, w). featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Default: None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_xy_pos_indexes (list[Tensor]): Each element in the list contains the index of positive samples in corresponding level, has shape (num_pos, 2), last dimension 2 present (index_x, index_y). """ mlvl_pos_mask_targets, mlvl_labels, \ mlvl_pos_masks = \ super()._get_targets_single(gt_bboxes, gt_labels, gt_masks, featmap_sizes=featmap_sizes) mlvl_xy_pos_indexes = [(item - self.num_classes).nonzero() for item in mlvl_labels] return mlvl_pos_mask_targets, mlvl_labels, mlvl_xy_pos_indexes def get_results(self, mlvl_mask_preds_x, mlvl_mask_preds_y, mlvl_cls_scores, img_metas, rescale=None, **kwargs): """Get multi-image mask results. Args: mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes ,num_grids ,num_grids). img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ mlvl_cls_scores = [ item.permute(0, 2, 3, 1) for item in mlvl_cls_scores ] assert len(mlvl_mask_preds_x) == len(mlvl_cls_scores) num_levels = len(mlvl_cls_scores) results_list = [] for img_id in range(len(img_metas)): cls_pred_list = [ mlvl_cls_scores[i][img_id].view( -1, self.cls_out_channels).detach() for i in range(num_levels) ] mask_pred_list_x = [ mlvl_mask_preds_x[i][img_id] for i in range(num_levels) ] mask_pred_list_y = [ mlvl_mask_preds_y[i][img_id] for i in range(num_levels) ] cls_pred_list = torch.cat(cls_pred_list, dim=0) mask_pred_list_x = torch.cat(mask_pred_list_x, dim=0) mask_pred_list_y = torch.cat(mask_pred_list_y, dim=0) results = self._get_results_single( cls_pred_list, mask_pred_list_x, mask_pred_list_y, img_meta=img_metas[img_id], cfg=self.test_cfg) results_list.append(results) return results_list def _get_results_single(self, cls_scores, mask_preds_x, mask_preds_y, img_meta, cfg): """Get processed mask related results of single image. Args: cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds_x (Tensor): Mask prediction of x branch of all points in single image, has shape (sum_num_grids, feat_h, feat_w). mask_preds_y (Tensor): Mask prediction of y branch of all points in single image, has shape (sum_num_grids, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict): Config used in test phase. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(results, cls_scores): """Generate a empty results.""" results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) results.labels = cls_scores.new_ones(0) return results cfg = self.test_cfg if cfg is None else cfg results = InstanceData(img_meta) img_shape = results.img_shape ori_shape = results.ori_shape h, w, _ = img_shape featmap_size = mask_preds_x.size()[-2:] upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] inds = score_mask.nonzero() lvl_interval = inds.new_tensor(self.num_grids).pow(2).cumsum(0) num_all_points = lvl_interval[-1] lvl_start_index = inds.new_ones(num_all_points) num_grids = inds.new_ones(num_all_points) seg_size = inds.new_tensor(self.num_grids).cumsum(0) mask_lvl_start_index = inds.new_ones(num_all_points) strides = inds.new_ones(num_all_points) lvl_start_index[:lvl_interval[0]] *= 0 mask_lvl_start_index[:lvl_interval[0]] *= 0 num_grids[:lvl_interval[0]] *= self.num_grids[0] strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ lvl_interval[lvl - 1] mask_lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ seg_size[lvl - 1] num_grids[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ self.num_grids[lvl] strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ self.strides[lvl] lvl_start_index = lvl_start_index[inds[:, 0]] mask_lvl_start_index = mask_lvl_start_index[inds[:, 0]] num_grids = num_grids[inds[:, 0]] strides = strides[inds[:, 0]] y_lvl_offset = (inds[:, 0] - lvl_start_index) // num_grids x_lvl_offset = (inds[:, 0] - lvl_start_index) % num_grids y_inds = mask_lvl_start_index + y_lvl_offset x_inds = mask_lvl_start_index + x_lvl_offset cls_labels = inds[:, 1] mask_preds = mask_preds_x[x_inds, ...] * mask_preds_y[y_inds, ...] masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(results, cls_scores) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear')[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0) masks = mask_preds > cfg.mask_thr results.masks = masks results.labels = labels results.scores = scores return results @HEADS.register_module() class DecoupledSOLOLightHead(DecoupledSOLOHead): """Decoupled Light SOLO mask head used in `SOLO: Segmenting Objects by Locations `_ Args: with_dcn (bool): Whether use dcn in mask_convs and cls_convs, default: False. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, dcn_cfg=None, init_cfg=[ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_x')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_y')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs): assert dcn_cfg is None or isinstance(dcn_cfg, dict) self.dcn_cfg = dcn_cfg super(DecoupledSOLOLightHead, self).__init__( *args, init_cfg=init_cfg, **kwargs) def _init_layers(self): self.mask_convs = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): if self.dcn_cfg is not None\ and i == self.stacked_convs - 1: conv_cfg = self.dcn_cfg else: conv_cfg = None chn = self.in_channels + 2 if i == 0 else self.feat_channels self.mask_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) self.conv_mask_list_x = nn.ModuleList() self.conv_mask_list_y = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list_x.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_mask_list_y.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, feats): assert len(feats) == self.num_levels feats = self.resize_feats(feats) mask_preds_x = [] mask_preds_y = [] cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat = torch.cat([mask_feat, coord_feat], 1) for mask_layer in self.mask_convs: mask_feat = mask_layer(mask_feat) mask_feat = F.interpolate( mask_feat, scale_factor=2, mode='bilinear') mask_pred_x = self.conv_mask_list_x[i](mask_feat) mask_pred_y = self.conv_mask_list_y[i](mask_feat) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred_x = F.interpolate( mask_pred_x.sigmoid(), size=upsampled_size, mode='bilinear') mask_pred_y = F.interpolate( mask_pred_y.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mask_preds_x.append(mask_pred_x) mask_preds_y.append(mask_pred_y) cls_preds.append(cls_pred) return mask_preds_x, mask_preds_y, cls_preds ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/solov2_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.core import InstanceData, mask_matrix_nms, multi_apply from mmdet.core.utils import center_of_mass, generate_coordinate from mmdet.models.builder import HEADS from mmdet.utils.misc import floordiv from .solo_head import SOLOHead class MaskFeatModule(BaseModule): """SOLOv2 mask feature map branch used in `SOLOv2: Dynamic and Fast Instance Segmentation. `_ Args: in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels of the mask feature map branch. start_level (int): The starting feature map level from RPN that will be used to predict the mask feature map. end_level (int): The ending feature map level from rpn that will be used to predict the mask feature map. out_channels (int): Number of output channels of the mask feature map branch. This is the channel count of the mask feature map that to be dynamically convolved with the predicted kernel. mask_stride (int): Downsample factor of the mask feature map output. Default: 4. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, feat_channels, start_level, end_level, out_channels, mask_stride=4, conv_cfg=None, norm_cfg=None, init_cfg=[dict(type='Normal', layer='Conv2d', std=0.01)]): super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.feat_channels = feat_channels self.start_level = start_level self.end_level = end_level self.mask_stride = mask_stride assert start_level >= 0 and end_level >= start_level self.out_channels = out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self._init_layers() self.fp16_enabled = False def _init_layers(self): self.convs_all_levels = nn.ModuleList() for i in range(self.start_level, self.end_level + 1): convs_per_level = nn.Sequential() if i == 0: convs_per_level.add_module( f'conv{i}', ConvModule( self.in_channels, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs_all_levels.append(convs_per_level) continue for j in range(i): if j == 0: if i == self.end_level: chn = self.in_channels + 2 else: chn = self.in_channels convs_per_level.add_module( f'conv{j}', ConvModule( chn, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) convs_per_level.add_module( f'upsample{j}', nn.Upsample( scale_factor=2, mode='bilinear', align_corners=False)) continue convs_per_level.add_module( f'conv{j}', ConvModule( self.feat_channels, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) convs_per_level.add_module( f'upsample{j}', nn.Upsample( scale_factor=2, mode='bilinear', align_corners=False)) self.convs_all_levels.append(convs_per_level) self.conv_pred = ConvModule( self.feat_channels, self.out_channels, 1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) @auto_fp16() def forward(self, feats): inputs = feats[self.start_level:self.end_level + 1] assert len(inputs) == (self.end_level - self.start_level + 1) feature_add_all_level = self.convs_all_levels[0](inputs[0]) for i in range(1, len(inputs)): input_p = inputs[i] if i == len(inputs) - 1: coord_feat = generate_coordinate(input_p.size(), input_p.device) input_p = torch.cat([input_p, coord_feat], 1) # fix runtime error of "+=" inplace operation in PyTorch 1.10 feature_add_all_level = feature_add_all_level + \ self.convs_all_levels[i](input_p) feature_pred = self.conv_pred(feature_add_all_level) return feature_pred @HEADS.register_module() class SOLOV2Head(SOLOHead): """SOLOv2 mask head used in `SOLOv2: Dynamic and Fast Instance Segmentation. `_ Args: mask_feature_head (dict): Config of SOLOv2MaskFeatHead. dynamic_conv_size (int): Dynamic Conv kernel size. Default: 1. dcn_cfg (dict): Dcn conv configurations in kernel_convs and cls_conv. default: None. dcn_apply_to_all_conv (bool): Whether to use dcn in every layer of kernel_convs and cls_convs, or only the last layer. It shall be set `True` for the normal version of SOLOv2 and `False` for the light-weight version. default: True. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, mask_feature_head, dynamic_conv_size=1, dcn_cfg=None, dcn_apply_to_all_conv=True, init_cfg=[ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs): assert dcn_cfg is None or isinstance(dcn_cfg, dict) self.dcn_cfg = dcn_cfg self.with_dcn = dcn_cfg is not None self.dcn_apply_to_all_conv = dcn_apply_to_all_conv self.dynamic_conv_size = dynamic_conv_size mask_out_channels = mask_feature_head.get('out_channels') self.kernel_out_channels = \ mask_out_channels * self.dynamic_conv_size * self.dynamic_conv_size super().__init__(*args, init_cfg=init_cfg, **kwargs) # update the in_channels of mask_feature_head if mask_feature_head.get('in_channels', None) is not None: if mask_feature_head.in_channels != self.in_channels: warnings.warn('The `in_channels` of SOLOv2MaskFeatHead and ' 'SOLOv2Head should be same, changing ' 'mask_feature_head.in_channels to ' f'{self.in_channels}') mask_feature_head.update(in_channels=self.in_channels) else: mask_feature_head.update(in_channels=self.in_channels) self.mask_feature_head = MaskFeatModule(**mask_feature_head) self.mask_stride = self.mask_feature_head.mask_stride self.fp16_enabled = False def _init_layers(self): self.cls_convs = nn.ModuleList() self.kernel_convs = nn.ModuleList() conv_cfg = None for i in range(self.stacked_convs): if self.with_dcn: if self.dcn_apply_to_all_conv: conv_cfg = self.dcn_cfg elif i == self.stacked_convs - 1: # light head conv_cfg = self.dcn_cfg chn = self.in_channels + 2 if i == 0 else self.feat_channels self.kernel_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_kernel = nn.Conv2d( self.feat_channels, self.kernel_out_channels, 3, padding=1) @auto_fp16() def forward(self, feats): assert len(feats) == self.num_levels mask_feats = self.mask_feature_head(feats) feats = self.resize_feats(feats) mlvl_kernel_preds = [] mlvl_cls_preds = [] for i in range(self.num_levels): ins_kernel_feat = feats[i] # ins branch # concat coord coord_feat = generate_coordinate(ins_kernel_feat.size(), ins_kernel_feat.device) ins_kernel_feat = torch.cat([ins_kernel_feat, coord_feat], 1) # kernel branch kernel_feat = ins_kernel_feat kernel_feat = F.interpolate( kernel_feat, size=self.num_grids[i], mode='bilinear', align_corners=False) cate_feat = kernel_feat[:, :-2, :, :] kernel_feat = kernel_feat.contiguous() for i, kernel_conv in enumerate(self.kernel_convs): kernel_feat = kernel_conv(kernel_feat) kernel_pred = self.conv_kernel(kernel_feat) # cate branch cate_feat = cate_feat.contiguous() for i, cls_conv in enumerate(self.cls_convs): cate_feat = cls_conv(cate_feat) cate_pred = self.conv_cls(cate_feat) mlvl_kernel_preds.append(kernel_pred) mlvl_cls_preds.append(cate_pred) return mlvl_kernel_preds, mlvl_cls_preds, mask_feats def _get_targets_single(self, gt_bboxes, gt_labels, gt_masks, featmap_size=None): """Compute targets for predictions of single image. Args: gt_bboxes (Tensor): Ground truth bbox of each instance, shape (num_gts, 4). gt_labels (Tensor): Ground truth label of each instance, shape (num_gts,). gt_masks (Tensor): Ground truth mask of each instance, shape (num_gts, h, w). featmap_sizes (:obj:`torch.size`): Size of UNified mask feature map used to generate instance segmentation masks by dynamic convolution, each element means (feat_h, feat_w). Default: None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_pos_masks (list[Tensor]): Each element is a `BoolTensor` to represent whether the corresponding point in single level is positive, has shape (num_grid **2). - mlvl_pos_indexes (list[list]): Each element in the list contains the positive index in corresponding level, has shape (num_pos). """ device = gt_labels.device gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) mlvl_pos_mask_targets = [] mlvl_pos_indexes = [] mlvl_labels = [] mlvl_pos_masks = [] for (lower_bound, upper_bound), num_grid \ in zip(self.scale_ranges, self.num_grids): mask_target = [] # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_index = [] labels = torch.zeros([num_grid, num_grid], dtype=torch.int64, device=device) + self.num_classes pos_mask = torch.zeros([num_grid**2], dtype=torch.bool, device=device) gt_inds = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(gt_inds) == 0: mlvl_pos_mask_targets.append( torch.zeros([0, featmap_size[0], featmap_size[1]], dtype=torch.uint8, device=device)) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) mlvl_pos_indexes.append([]) continue hit_gt_bboxes = gt_bboxes[gt_inds] hit_gt_labels = gt_labels[gt_inds] hit_gt_masks = gt_masks[gt_inds, ...] pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - hit_gt_bboxes[:, 0]) * self.pos_scale pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - hit_gt_bboxes[:, 1]) * self.pos_scale # Make sure hit_gt_masks has a value valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 for gt_mask, gt_label, pos_h_range, pos_w_range, \ valid_mask_flag in \ zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, pos_w_ranges, valid_mask_flags): if not valid_mask_flag: continue upsampled_size = (featmap_size[0] * self.mask_stride, featmap_size[1] * self.mask_stride) center_h, center_w = center_of_mass(gt_mask) coord_w = int( floordiv((center_w / upsampled_size[1]), (1. / num_grid), rounding_mode='trunc')) coord_h = int( floordiv((center_h / upsampled_size[0]), (1. / num_grid), rounding_mode='trunc')) # left, top, right, down top_box = max( 0, int( floordiv( (center_h - pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) down_box = min( num_grid - 1, int( floordiv( (center_h + pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) left_box = max( 0, int( floordiv( (center_w - pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) right_box = min( num_grid - 1, int( floordiv( (center_w + pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) top = max(top_box, coord_h - 1) down = min(down_box, coord_h + 1) left = max(coord_w - 1, left_box) right = min(right_box, coord_w + 1) labels[top:(down + 1), left:(right + 1)] = gt_label # ins gt_mask = np.uint8(gt_mask.cpu().numpy()) # Follow the original implementation, F.interpolate is # different from cv2 and opencv gt_mask = mmcv.imrescale(gt_mask, scale=1. / self.mask_stride) gt_mask = torch.from_numpy(gt_mask).to(device=device) for i in range(top, down + 1): for j in range(left, right + 1): index = int(i * num_grid + j) this_mask_target = torch.zeros( [featmap_size[0], featmap_size[1]], dtype=torch.uint8, device=device) this_mask_target[:gt_mask.shape[0], :gt_mask. shape[1]] = gt_mask mask_target.append(this_mask_target) pos_mask[index] = True pos_index.append(index) if len(mask_target) == 0: mask_target = torch.zeros( [0, featmap_size[0], featmap_size[1]], dtype=torch.uint8, device=device) else: mask_target = torch.stack(mask_target, 0) mlvl_pos_mask_targets.append(mask_target) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) mlvl_pos_indexes.append(pos_index) return (mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks, mlvl_pos_indexes) @force_fp32(apply_to=('mlvl_kernel_preds', 'mlvl_cls_preds', 'mask_feats')) def loss(self, mlvl_kernel_preds, mlvl_cls_preds, mask_feats, gt_labels, gt_masks, img_metas, gt_bboxes=None, **kwargs): """Calculate the loss of total batch. Args: mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel prediction. The kernel is used to generate instance segmentation masks by dynamic convolution. Each element in the list has shape (batch_size, kernel_out_channels, num_grids, num_grids). mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids, num_grids). mask_feats (Tensor): Unified mask feature map used to generate instance segmentation masks by dynamic convolution. Has shape (batch_size, mask_out_channels, h, w). gt_labels (list[Tensor]): Labels of multiple images. gt_masks (list[Tensor]): Ground truth masks of multiple images. Each has shape (num_instances, h, w). img_metas (list[dict]): Meta information of multiple images. gt_bboxes (list[Tensor]): Ground truth bboxes of multiple images. Default: None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_size = mask_feats.size()[-2:] pos_mask_targets, labels, pos_masks, pos_indexes = multi_apply( self._get_targets_single, gt_bboxes, gt_labels, gt_masks, featmap_size=featmap_size) mlvl_mask_targets = [ torch.cat(lvl_mask_targets, 0) for lvl_mask_targets in zip(*pos_mask_targets) ] mlvl_pos_kernel_preds = [] for lvl_kernel_preds, lvl_pos_indexes in zip(mlvl_kernel_preds, zip(*pos_indexes)): lvl_pos_kernel_preds = [] for img_lvl_kernel_preds, img_lvl_pos_indexes in zip( lvl_kernel_preds, lvl_pos_indexes): img_lvl_pos_kernel_preds = img_lvl_kernel_preds.view( img_lvl_kernel_preds.shape[0], -1)[:, img_lvl_pos_indexes] lvl_pos_kernel_preds.append(img_lvl_pos_kernel_preds) mlvl_pos_kernel_preds.append(lvl_pos_kernel_preds) # make multilevel mlvl_mask_pred mlvl_mask_preds = [] for lvl_pos_kernel_preds in mlvl_pos_kernel_preds: lvl_mask_preds = [] for img_id, img_lvl_pos_kernel_pred in enumerate( lvl_pos_kernel_preds): if img_lvl_pos_kernel_pred.size()[-1] == 0: continue img_mask_feats = mask_feats[[img_id]] h, w = img_mask_feats.shape[-2:] num_kernel = img_lvl_pos_kernel_pred.shape[1] img_lvl_mask_pred = F.conv2d( img_mask_feats, img_lvl_pos_kernel_pred.permute(1, 0).view( num_kernel, -1, self.dynamic_conv_size, self.dynamic_conv_size), stride=1).view(-1, h, w) lvl_mask_preds.append(img_lvl_mask_pred) if len(lvl_mask_preds) == 0: lvl_mask_preds = None else: lvl_mask_preds = torch.cat(lvl_mask_preds, 0) mlvl_mask_preds.append(lvl_mask_preds) # dice loss num_pos = 0 for img_pos_masks in pos_masks: for lvl_img_pos_masks in img_pos_masks: num_pos += lvl_img_pos_masks.count_nonzero() loss_mask = [] for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds, mlvl_mask_targets): if lvl_mask_preds is None: continue loss_mask.append( self.loss_mask( lvl_mask_preds, lvl_mask_targets, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = mask_feats.sum() * 0 # cate flatten_labels = [ torch.cat( [img_lvl_labels.flatten() for img_lvl_labels in lvl_labels]) for lvl_labels in zip(*labels) ] flatten_labels = torch.cat(flatten_labels) flatten_cls_preds = [ lvl_cls_preds.permute(0, 2, 3, 1).reshape(-1, self.num_classes) for lvl_cls_preds in mlvl_cls_preds ] flatten_cls_preds = torch.cat(flatten_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) @force_fp32( apply_to=('mlvl_kernel_preds', 'mlvl_cls_scores', 'mask_feats')) def get_results(self, mlvl_kernel_preds, mlvl_cls_scores, mask_feats, img_metas, **kwargs): """Get multi-image mask results. Args: mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel prediction. The kernel is used to generate instance segmentation masks by dynamic convolution. Each element in the list has shape (batch_size, kernel_out_channels, num_grids, num_grids). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids, num_grids). mask_feats (Tensor): Unified mask feature map used to generate instance segmentation masks by dynamic convolution. Has shape (batch_size, mask_out_channels, h, w). img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ num_levels = len(mlvl_cls_scores) assert len(mlvl_kernel_preds) == len(mlvl_cls_scores) for lvl in range(num_levels): cls_scores = mlvl_cls_scores[lvl] cls_scores = cls_scores.sigmoid() local_max = F.max_pool2d(cls_scores, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_scores cls_scores = cls_scores * keep_mask mlvl_cls_scores[lvl] = cls_scores.permute(0, 2, 3, 1) result_list = [] for img_id in range(len(img_metas)): img_cls_pred = [ mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) for lvl in range(num_levels) ] img_mask_feats = mask_feats[[img_id]] img_kernel_pred = [ mlvl_kernel_preds[lvl][img_id].permute(1, 2, 0).view( -1, self.kernel_out_channels) for lvl in range(num_levels) ] img_cls_pred = torch.cat(img_cls_pred, dim=0) img_kernel_pred = torch.cat(img_kernel_pred, dim=0) result = self._get_results_single( img_kernel_pred, img_cls_pred, img_mask_feats, img_meta=img_metas[img_id]) result_list.append(result) return result_list def _get_results_single(self, kernel_preds, cls_scores, mask_feats, img_meta, cfg=None): """Get processed mask related results of single image. Args: kernel_preds (Tensor): Dynamic kernel prediction of all points in single image, has shape (num_points, kernel_out_channels). cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds (Tensor): Mask prediction of all points in single image, has shape (num_points, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict, optional): Config used in test phase. Default: None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(results, cls_scores): """Generate a empty results.""" results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) results.labels = cls_scores.new_ones(0) return results cfg = self.test_cfg if cfg is None else cfg assert len(kernel_preds) == len(cls_scores) results = InstanceData(img_meta) featmap_size = mask_feats.size()[-2:] img_shape = results.img_shape ori_shape = results.ori_shape # overall info h, w, _ = img_shape upsampled_size = (featmap_size[0] * self.mask_stride, featmap_size[1] * self.mask_stride) # process. score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] if len(cls_scores) == 0: return empty_results(results, cls_scores) # cate_labels & kernel_preds inds = score_mask.nonzero() cls_labels = inds[:, 1] kernel_preds = kernel_preds[inds[:, 0]] # trans vector. lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) strides = kernel_preds.new_ones(lvl_interval[-1]) strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= self.strides[lvl] strides = strides[inds[:, 0]] # mask encoding. kernel_preds = kernel_preds.view( kernel_preds.size(0), -1, self.dynamic_conv_size, self.dynamic_conv_size) mask_preds = F.conv2d( mask_feats, kernel_preds, stride=1).squeeze(0).sigmoid() # mask. masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(results, cls_scores) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear', align_corners=False)[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=ori_shape[:2], mode='bilinear', align_corners=False).squeeze(0) masks = mask_preds > cfg.mask_thr results.masks = masks results.labels = labels results.scores = scores return results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ssd_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import force_fp32 from mmdet.core import (build_assigner, build_bbox_coder, build_prior_generator, build_sampler, multi_apply) from ..builder import HEADS from ..losses import smooth_l1_loss from .anchor_head import AnchorHead # TODO: add loss evaluator for SSD @HEADS.register_module() class SSDHead(AnchorHead): """SSD head used in https://arxiv.org/abs/1512.02325. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 0. feat_channels (int): Number of hidden channels when stacked_convs > 0. Default: 256. use_depthwise (bool): Whether to use DepthwiseSeparableConv. Default: False. conv_cfg (dict): Dictionary to construct and config conv layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: None. act_cfg (dict): Dictionary to construct and config activation layer. Default: None. anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 def __init__(self, num_classes=80, in_channels=(512, 1024, 512, 256, 256, 256), stacked_convs=0, feat_channels=256, use_depthwise=False, conv_cfg=None, norm_cfg=None, act_cfg=None, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], ), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform', bias=0)): super(AnchorHead, self).__init__(init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.stacked_convs = stacked_convs self.feat_channels = feat_channels self.use_depthwise = use_depthwise self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.cls_out_channels = num_classes + 1 # add background class self.prior_generator = build_prior_generator(anchor_generator) # Usually the numbers of anchors for each level are the same # except SSD detectors. So it is an int in the most dense # heads but a list of int in SSDHead self.num_base_priors = self.prior_generator.num_base_priors self._init_layers() self.bbox_coder = build_bbox_coder(bbox_coder) self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = False self.cls_focal_loss = False self.train_cfg = train_cfg self.test_cfg = test_cfg # set sampling=False for archor_target self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False @property def num_anchors(self): """ Returns: list[int]: Number of base_anchors on each point of each level. """ warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.num_base_priors def _init_layers(self): """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() # TODO: Use registry to choose ConvModule type conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule for channel, num_base_priors in zip(self.in_channels, self.num_base_priors): cls_layers = [] reg_layers = [] in_channel = channel # build stacked conv tower, not used in default ssd for i in range(self.stacked_convs): cls_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) in_channel = self.feat_channels # SSD-Lite head if self.use_depthwise: cls_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) cls_layers.append( nn.Conv2d( in_channel, num_base_priors * self.cls_out_channels, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) reg_layers.append( nn.Conv2d( in_channel, num_base_priors * 4, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) self.cls_convs.append(nn.Sequential(*cls_layers)) self.reg_convs.append(nn.Sequential(*reg_layers)) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return cls_scores, bbox_preds def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Compute loss of a single image. Args: cls_score (Tensor): Box scores for eachimage Has shape (num_total_anchors, num_classes). bbox_pred (Tensor): Box energies / deltas for each image level with shape (num_total_anchors, 4). anchors (Tensor): Box reference for each scale level with shape (num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (num_total_anchors,). label_weights (Tensor): Label weights of each anchor with shape (num_total_anchors,) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (num_total_anchors, 4). num_total_samples (int): If sampling, num total samples equal to the number of total anchors; Otherwise, it is the number of positive anchors. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_cls_all = F.cross_entropy( cls_score, labels, reduction='none') * label_weights # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) loss_bbox = smooth_l1_loss( bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg.smoothl1_beta, avg_factor=num_total_samples) return loss_cls[None], loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) losses_cls, losses_bbox = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/tood_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init from mmcv.ops import deform_conv2d from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, distance2bbox, images_to_levels, multi_apply, reduce_mean, unmap) from mmdet.core.utils import filter_scores_and_topk from mmdet.models.utils import sigmoid_geometric_mean from ..builder import HEADS, build_loss from .atss_head import ATSSHead class TaskDecomposition(nn.Module): """Task decomposition module in task-aligned predictor of TOOD. Args: feat_channels (int): Number of feature channels in TOOD head. stacked_convs (int): Number of conv layers in TOOD head. la_down_rate (int): Downsample rate of layer attention. conv_cfg (dict): Config dict for convolution layer. norm_cfg (dict): Config dict for normalization layer. """ def __init__(self, feat_channels, stacked_convs, la_down_rate=8, conv_cfg=None, norm_cfg=None): super(TaskDecomposition, self).__init__() self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.in_channels = self.feat_channels * self.stacked_convs self.norm_cfg = norm_cfg self.layer_attention = nn.Sequential( nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1), nn.ReLU(inplace=True), nn.Conv2d( self.in_channels // la_down_rate, self.stacked_convs, 1, padding=0), nn.Sigmoid()) self.reduction_conv = ConvModule( self.in_channels, self.feat_channels, 1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=norm_cfg is None) def init_weights(self): for m in self.layer_attention.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) normal_init(self.reduction_conv.conv, std=0.01) def forward(self, feat, avg_feat=None): b, c, h, w = feat.shape if avg_feat is None: avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) weight = self.layer_attention(avg_feat) # here we first compute the product between layer attention weight and # conv weight, and then compute the convolution between new conv weight # and feature map, in order to save memory and FLOPs. conv_weight = weight.reshape( b, 1, self.stacked_convs, 1) * self.reduction_conv.conv.weight.reshape( 1, self.feat_channels, self.stacked_convs, self.feat_channels) conv_weight = conv_weight.reshape(b, self.feat_channels, self.in_channels) feat = feat.reshape(b, self.in_channels, h * w) feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h, w) if self.norm_cfg is not None: feat = self.reduction_conv.norm(feat) feat = self.reduction_conv.activate(feat) return feat @HEADS.register_module() class TOODHead(ATSSHead): """TOODHead used in `TOOD: Task-aligned One-stage Object Detection. `_. TOOD uses Task-aligned head (T-head) and is optimized by Task Alignment Learning (TAL). Args: num_dcn (int): Number of deformable convolution in the head. Default: 0. anchor_type (str): If set to `anchor_free`, the head will use centers to regress bboxes. If set to `anchor_based`, the head will regress bboxes based on anchors. Default: `anchor_free`. initial_loss_cls (dict): Config of initial loss. Example: >>> self = TOODHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ def __init__(self, num_classes, in_channels, num_dcn=0, anchor_type='anchor_free', initial_loss_cls=dict( type='FocalLoss', use_sigmoid=True, activated=True, gamma=2.0, alpha=0.25, loss_weight=1.0), **kwargs): assert anchor_type in ['anchor_free', 'anchor_based'] self.num_dcn = num_dcn self.anchor_type = anchor_type self.epoch = 0 # which would be update in SetEpochInfoHook! super(TOODHead, self).__init__(num_classes, in_channels, **kwargs) if self.train_cfg: self.initial_epoch = self.train_cfg.initial_epoch self.initial_assigner = build_assigner( self.train_cfg.initial_assigner) self.initial_loss_cls = build_loss(initial_loss_cls) self.assigner = self.initial_assigner self.alignment_assigner = build_assigner(self.train_cfg.assigner) self.alpha = self.train_cfg.alpha self.beta = self.train_cfg.beta def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.inter_convs = nn.ModuleList() for i in range(self.stacked_convs): if i < self.num_dcn: conv_cfg = dict(type='DCNv2', deform_groups=4) else: conv_cfg = self.conv_cfg chn = self.in_channels if i == 0 else self.feat_channels self.inter_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) self.cls_decomp = TaskDecomposition(self.feat_channels, self.stacked_convs, self.stacked_convs * 8, self.conv_cfg, self.norm_cfg) self.reg_decomp = TaskDecomposition(self.feat_channels, self.stacked_convs, self.stacked_convs * 8, self.conv_cfg, self.norm_cfg) self.tood_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.tood_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.cls_prob_module = nn.Sequential( nn.Conv2d(self.feat_channels * self.stacked_convs, self.feat_channels // 4, 1), nn.ReLU(inplace=True), nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1)) self.reg_offset_module = nn.Sequential( nn.Conv2d(self.feat_channels * self.stacked_convs, self.feat_channels // 4, 1), nn.ReLU(inplace=True), nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1)) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def init_weights(self): """Initialize weights of the head.""" bias_cls = bias_init_with_prob(0.01) for m in self.inter_convs: normal_init(m.conv, std=0.01) for m in self.cls_prob_module: if isinstance(m, nn.Conv2d): normal_init(m, std=0.01) for m in self.reg_offset_module: if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls) self.cls_decomp.init_weights() self.reg_decomp.init_weights() normal_init(self.tood_cls, std=0.01, bias=bias_cls) normal_init(self.tood_reg, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Decoded box for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. In [tl_x, tl_y, br_x, br_y] format. """ cls_scores = [] bbox_preds = [] for idx, (x, scale, stride) in enumerate( zip(feats, self.scales, self.prior_generator.strides)): b, c, h, w = x.shape anchor = self.prior_generator.single_level_grid_priors( (h, w), idx, device=x.device) anchor = torch.cat([anchor for _ in range(b)]) # extract task interactive features inter_feats = [] for inter_conv in self.inter_convs: x = inter_conv(x) inter_feats.append(x) feat = torch.cat(inter_feats, 1) # task decomposition avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) cls_feat = self.cls_decomp(feat, avg_feat) reg_feat = self.reg_decomp(feat, avg_feat) # cls prediction and alignment cls_logits = self.tood_cls(cls_feat) cls_prob = self.cls_prob_module(feat) cls_score = sigmoid_geometric_mean(cls_logits, cls_prob) # reg prediction and alignment if self.anchor_type == 'anchor_free': reg_dist = scale(self.tood_reg(reg_feat).exp()).float() reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) reg_bbox = distance2bbox( self.anchor_center(anchor) / stride[0], reg_dist).reshape(b, h, w, 4).permute(0, 3, 1, 2) # (b, c, h, w) elif self.anchor_type == 'anchor_based': reg_dist = scale(self.tood_reg(reg_feat)).float() reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape( b, h, w, 4).permute(0, 3, 1, 2) / stride[0] else: raise NotImplementedError( f'Unknown anchor type: {self.anchor_type}.' f'Please use `anchor_free` or `anchor_based`.') reg_offset = self.reg_offset_module(feat) bbox_pred = self.deform_sampling(reg_bbox.contiguous(), reg_offset.contiguous()) # After deform_sampling, some boxes will become invalid (The # left-top point is at the right or bottom of the right-bottom # point), which will make the GIoULoss negative. invalid_bbox_idx = (bbox_pred[:, [0]] > bbox_pred[:, [2]]) | \ (bbox_pred[:, [1]] > bbox_pred[:, [3]]) invalid_bbox_idx = invalid_bbox_idx.expand_as(bbox_pred) bbox_pred = torch.where(invalid_bbox_idx, reg_bbox, bbox_pred) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return tuple(cls_scores), tuple(bbox_preds) def deform_sampling(self, feat, offset): """Sampling the feature x according to offset. Args: feat (Tensor): Feature offset (Tensor): Spatial offset for feature sampling """ # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y def anchor_center(self, anchors): """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. Returns: Tensor: Anchor centers with shape (N, 2), "xy" format. """ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, bbox_targets, alignment_metrics, stride): """Compute loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Decoded bboxes for each scale level with shape (N, num_anchors * 4, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors). bbox_targets (Tensor): BBox regression targets of each anchor with shape (N, num_total_anchors, 4). alignment_metrics (Tensor): Alignment metrics with shape (N, num_total_anchors). stride (tuple[int]): Downsample stride of the feature map. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) alignment_metrics = alignment_metrics.reshape(-1) label_weights = label_weights.reshape(-1) targets = labels if self.epoch < self.initial_epoch else ( labels, alignment_metrics) cls_loss_func = self.initial_loss_cls \ if self.epoch < self.initial_epoch else self.loss_cls loss_cls = cls_loss_func( cls_score, targets, label_weights, avg_factor=1.0) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_decode_bbox_pred = pos_bbox_pred pos_decode_bbox_targets = pos_bbox_targets / stride[0] # regression loss pos_bbox_weight = self.centerness_target( pos_anchors, pos_bbox_targets ) if self.epoch < self.initial_epoch else alignment_metrics[ pos_inds] loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=pos_bbox_weight, avg_factor=1.0) else: loss_bbox = bbox_pred.sum() * 0 pos_bbox_weight = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, alignment_metrics.sum( ), pos_bbox_weight.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Decoded box for each scale level with shape (N, num_anchors * 4, H, W) in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = len(img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 flatten_cls_scores = torch.cat([ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_score in cls_scores ], 1) flatten_bbox_preds = torch.cat([ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) * stride[0] for bbox_pred, stride in zip(bbox_preds, self.prior_generator.strides) ], 1) cls_reg_targets = self.get_targets( flatten_cls_scores, flatten_bbox_preds, anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) (anchor_list, labels_list, label_weights_list, bbox_targets_list, alignment_metrics_list) = cls_reg_targets losses_cls, losses_bbox,\ cls_avg_factors, bbox_avg_factors = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, alignment_metrics_list, self.prior_generator.strides) cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item() losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls)) bbox_avg_factor = reduce_mean( sum(bbox_avg_factors)).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for cls_score, bbox_pred, priors, stride in zip( cls_score_list, bbox_pred_list, mlvl_priors, self.prior_generator.strides): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) * stride[0] scores = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, keep_idxs, filtered_results = results bboxes = filtered_results['bbox_pred'] mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale, with_nms, None, **kwargs) def get_targets(self, cls_scores, bbox_preds, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in multiple images. Args: cls_scores (Tensor): Classification predictions of images, a 3D-Tensor with shape [num_imgs, num_priors, num_classes]. bbox_preds (Tensor): Decoded bboxes predictions of one image, a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: a tuple containing learning targets. - anchors_list (list[list[Tensor]]): Anchors of each level. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - norm_alignment_metrics_list (list[Tensor]): Normalized alignment metrics of each level. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] # anchor_list: list(b * [-1, 4]) if self.epoch < self.initial_epoch: (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( super()._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) all_assign_metrics = [ weight[..., 0] for weight in all_bbox_weights ] else: (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_assign_metrics) = multi_apply( self._get_target_single, cls_scores, bbox_preds, anchor_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) norm_alignment_metrics_list = images_to_levels(all_assign_metrics, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, norm_alignment_metrics_list) def _get_target_single(self, cls_scores, bbox_preds, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: cls_scores (list(Tensor)): Box scores for each image. bbox_preds (list(Tensor)): Box energies / deltas for each image. flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. anchors (Tensor): All anchors in the image with shape (N, 4). labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). norm_alignment_metrics (Tensor): Normalized alignment metrics of all priors in the image with shape (N,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] assign_result = self.alignment_assigner.assign( cls_scores[inside_flags, :], bbox_preds[inside_flags, :], anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, self.alpha, self.beta) assign_ious = assign_result.max_overlaps assign_metrics = assign_result.assign_metrics sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) norm_alignment_metrics = anchors.new_zeros( num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: # point-based pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 class_assigned_gt_inds = torch.unique( sampling_result.pos_assigned_gt_inds) for gt_inds in class_assigned_gt_inds: gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds == gt_inds] pos_alignment_metrics = assign_metrics[gt_class_inds] pos_ious = assign_ious[gt_class_inds] pos_norm_alignment_metrics = pos_alignment_metrics / ( pos_alignment_metrics.max() + 10e-8) * pos_ious.max() norm_alignment_metrics[gt_class_inds] = pos_norm_alignment_metrics # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) norm_alignment_metrics = unmap(norm_alignment_metrics, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, norm_alignment_metrics) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/vfnet_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmcv.ops import DeformConv2d from mmcv.runner import force_fp32 from mmdet.core import (MlvlPointGenerator, bbox_overlaps, build_assigner, build_prior_generator, build_sampler, multi_apply, reduce_mean) from ..builder import HEADS, build_loss from .atss_head import ATSSHead from .fcos_head import FCOSHead INF = 1e8 @HEADS.register_module() class VFNetHead(ATSSHead, FCOSHead): """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object Detector.`_. The VFNet predicts IoU-aware classification scores which mix the object presence confidence and object localization accuracy as the detection score. It is built on the FCOS architecture and uses ATSS for defining positive/negative training examples. The VFNet is trained with Varifocal Loss and empolys star-shaped deformable convolution to extract features for a bbox. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. regress_ranges (tuple[tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Default: False. center_sample_radius (float): Radius of center sampling. Default: 1.5. sync_num_pos (bool): If true, synchronize the number of positive examples across GPUs. Default: True gradient_mul (float): The multiplier to gradients from bbox refinement and recognition. Default: 0.1. bbox_norm_type (str): The bbox normalization type, 'reg_denom' or 'stride'. Default: reg_denom loss_cls_fl (dict): Config of focal loss. use_vfl (bool): If true, use varifocal loss for training. Default: True. loss_cls (dict): Config of varifocal loss. loss_bbox (dict): Config of localization loss, GIoU Loss. loss_bbox (dict): Config of localization refinement loss, GIoU Loss. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). use_atss (bool): If true, use ATSS to define positive/negative examples. Default: True. anchor_generator (dict): Config of anchor generator for ATSS. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> self = VFNetHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes, in_channels, regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling=False, center_sample_radius=1.5, sync_num_pos=True, gradient_mul=0.1, bbox_norm_type='reg_denom', loss_cls_fl=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), use_vfl=True, loss_cls=dict( type='VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.5), loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), use_atss=True, reg_decoded_bbox=True, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, center_offset=0.0, strides=[8, 16, 32, 64, 128]), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='vfnet_cls', std=0.01, bias_prob=0.01)), **kwargs): # dcn base offsets, adapted from reppoints_head.py self.num_dconv_points = 9 self.dcn_kernel = int(np.sqrt(self.num_dconv_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) super(FCOSHead, self).__init__( num_classes, in_channels, norm_cfg=norm_cfg, init_cfg=init_cfg, **kwargs) self.regress_ranges = regress_ranges self.reg_denoms = [ regress_range[-1] for regress_range in regress_ranges ] self.reg_denoms[-1] = self.reg_denoms[-2] * 2 self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.sync_num_pos = sync_num_pos self.bbox_norm_type = bbox_norm_type self.gradient_mul = gradient_mul self.use_vfl = use_vfl if self.use_vfl: self.loss_cls = build_loss(loss_cls) else: self.loss_cls = build_loss(loss_cls_fl) self.loss_bbox = build_loss(loss_bbox) self.loss_bbox_refine = build_loss(loss_bbox_refine) # for getting ATSS targets self.use_atss = use_atss self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.anchor_center_offset = anchor_generator['center_offset'] self.num_base_priors = self.prior_generator.num_base_priors[0] self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) # only be used in `get_atss_targets` when `use_atss` is True self.atss_prior_generator = build_prior_generator(anchor_generator) self.fcos_prior_generator = MlvlPointGenerator( anchor_generator['strides'], self.anchor_center_offset if self.use_atss else 0.5) # In order to reuse the `get_bboxes` in `BaseDenseHead. # Only be used in testing phase. self.prior_generator = self.fcos_prior_generator @property def num_anchors(self): """ Returns: int: Number of anchors on each point of feature map. """ warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.num_base_priors @property def anchor_generator(self): warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' 'please use "atss_prior_generator" instead') return self.prior_generator def _init_layers(self): """Initialize layers of the head.""" super(FCOSHead, self)._init_cls_convs() super(FCOSHead, self)._init_reg_convs() self.relu = nn.ReLU(inplace=True) self.vfnet_reg_conv = ConvModule( self.feat_channels, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias) self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) self.vfnet_reg_refine_dconv = DeformConv2d( self.feat_channels, self.feat_channels, self.dcn_kernel, 1, padding=self.dcn_pad) self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides]) self.vfnet_cls_dconv = DeformConv2d( self.feat_channels, self.feat_channels, self.dcn_kernel, 1, padding=self.dcn_pad) self.vfnet_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Box iou-aware scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, feats, self.scales, self.scales_refine, self.strides, self.reg_denoms) def forward_single(self, x, scale, scale_refine, stride, reg_denom): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the refined bbox prediction. stride (int): The corresponding stride for feature maps, used to normalize the bbox prediction when bbox_norm_type = 'stride'. reg_denom (int): The corresponding regression range for feature maps, only used to normalize the bbox prediction when bbox_norm_type = 'reg_denom'. Returns: tuple: iou-aware cls scores for each box, bbox predictions and refined bbox predictions of input feature maps. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) # predict the bbox_pred of different level reg_feat_init = self.vfnet_reg_conv(reg_feat) if self.bbox_norm_type == 'reg_denom': bbox_pred = scale( self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom elif self.bbox_norm_type == 'stride': bbox_pred = scale( self.vfnet_reg(reg_feat_init)).float().exp() * stride else: raise NotImplementedError # compute star deformable convolution offsets # converting dcn_offset to reg_feat.dtype thus VFNet can be # trained with FP16 dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul, stride).to(reg_feat.dtype) # refine the bbox_pred reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset)) bbox_pred_refine = scale_refine( self.vfnet_reg_refine(reg_feat)).float().exp() bbox_pred_refine = bbox_pred_refine * bbox_pred.detach() # predict the iou-aware cls score cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset)) cls_score = self.vfnet_cls(cls_feat) if self.training: return cls_score, bbox_pred, bbox_pred_refine else: return cls_score, bbox_pred_refine def star_dcn_offset(self, bbox_pred, gradient_mul, stride): """Compute the star deformable conv offsets. Args: bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b). gradient_mul (float): Gradient multiplier. stride (int): The corresponding stride for feature maps, used to project the bbox onto the feature map. Returns: dcn_offsets (Tensor): The offsets for deformable convolution. """ dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred) bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \ gradient_mul * bbox_pred # map to the feature map scale bbox_pred_grad_mul = bbox_pred_grad_mul / stride N, C, H, W = bbox_pred.size() x1 = bbox_pred_grad_mul[:, 0, :, :] y1 = bbox_pred_grad_mul[:, 1, :, :] x2 = bbox_pred_grad_mul[:, 2, :, :] y2 = bbox_pred_grad_mul[:, 3, :, :] bbox_pred_grad_mul_offset = bbox_pred.new_zeros( N, 2 * self.num_dconv_points, H, W) bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2 bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2 bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2 dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset return dcn_offset @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine')) def loss(self, cls_scores, bbox_preds, bbox_preds_refine, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.fcos_prior_generator.grid_priors( featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) labels, label_weights, bbox_targets, bbox_weights = self.get_targets( cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and bbox_preds_refine flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels).contiguous() for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() for bbox_pred in bbox_preds ] flatten_bbox_preds_refine = [ bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() for bbox_pred_refine in bbox_preds_refine ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = torch.where( ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0] num_pos = len(pos_inds) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds] pos_labels = flatten_labels[pos_inds] # sync num_pos across all gpus if self.sync_num_pos: num_pos_avg_per_gpu = reduce_mean( pos_inds.new_tensor(num_pos).float()).item() num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0) else: num_pos_avg_per_gpu = num_pos pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) iou_targets_ini = bbox_overlaps( pos_decoded_bbox_preds, pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_ini = iou_targets_ini.clone().detach() bbox_avg_factor_ini = reduce_mean( bbox_weights_ini.sum()).clamp_(min=1).item() pos_decoded_bbox_preds_refine = \ self.bbox_coder.decode(pos_points, pos_bbox_preds_refine) iou_targets_rf = bbox_overlaps( pos_decoded_bbox_preds_refine, pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_rf = iou_targets_rf.clone().detach() bbox_avg_factor_rf = reduce_mean( bbox_weights_rf.sum()).clamp_(min=1).item() if num_pos > 0: loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds.detach(), weight=bbox_weights_ini, avg_factor=bbox_avg_factor_ini) loss_bbox_refine = self.loss_bbox_refine( pos_decoded_bbox_preds_refine, pos_decoded_target_preds.detach(), weight=bbox_weights_rf, avg_factor=bbox_avg_factor_rf) # build IoU-aware cls_score targets if self.use_vfl: pos_ious = iou_targets_rf.clone().detach() cls_iou_targets = torch.zeros_like(flatten_cls_scores) cls_iou_targets[pos_inds, pos_labels] = pos_ious else: loss_bbox = pos_bbox_preds.sum() * 0 loss_bbox_refine = pos_bbox_preds_refine.sum() * 0 if self.use_vfl: cls_iou_targets = torch.zeros_like(flatten_cls_scores) if self.use_vfl: loss_cls = self.loss_cls( flatten_cls_scores, cls_iou_targets, avg_factor=num_pos_avg_per_gpu) else: loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, weight=label_weights, avg_factor=num_pos_avg_per_gpu) return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_bbox_rf=loss_bbox_refine) def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore): """A wrapper for computing ATSS and FCOS targets for points in multiple images. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). Returns: tuple: labels_list (list[Tensor]): Labels of each level. label_weights (Tensor/None): Label weights of all levels. bbox_targets_list (list[Tensor]): Regression targets of each level, (l, t, r, b). bbox_weights (Tensor/None): Bbox weights of all levels. """ if self.use_atss: return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) else: self.norm_on_bbox = False return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels) def _get_target_single(self, *args, **kwargs): """Avoid ambiguity in multiple inheritance.""" if self.use_atss: return ATSSHead._get_target_single(self, *args, **kwargs) else: return FCOSHead._get_target_single(self, *args, **kwargs) def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute FCOS regression and classification targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). Returns: tuple: labels (list[Tensor]): Labels of each level. label_weights: None, to be compatible with ATSS targets. bbox_targets (list[Tensor]): BBox targets of each level. bbox_weights: None, to be compatible with ATSS targets. """ labels, bbox_targets = FCOSHead.get_targets(self, points, gt_bboxes_list, gt_labels_list) label_weights = None bbox_weights = None return labels, label_weights, bbox_targets, bbox_weights def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): Device for returned tensors Returns: tuple: anchor_list (list[Tensor]): Anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = self.atss_prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = self.atss_prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device=device) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list def get_atss_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """A wrapper for computing ATSS targets for points in multiple images. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). Default: None. Returns: tuple: labels_list (list[Tensor]): Labels of each level. label_weights (Tensor): Label weights of all levels. bbox_targets_list (list[Tensor]): Regression targets of each level, (l, t, r, b). bbox_weights (Tensor): Bbox weights of all levels. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len( featmap_sizes ) == self.atss_prior_generator.num_levels == \ self.fcos_prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = ATSSHead.get_targets( self, anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, unmap_outputs=True) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets bbox_targets_list = [ bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list ] num_imgs = len(img_metas) # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format bbox_targets_list = self.transform_bbox_targets( bbox_targets_list, mlvl_points, num_imgs) labels_list = [labels.reshape(-1) for labels in labels_list] label_weights_list = [ label_weights.reshape(-1) for label_weights in label_weights_list ] bbox_weights_list = [ bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list ] label_weights = torch.cat(label_weights_list) bbox_weights = torch.cat(bbox_weights_list) return labels_list, label_weights, bbox_targets_list, bbox_weights def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs): """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format. Args: decoded_bboxes (list[Tensor]): Regression targets of each level, in the form of (x1, y1, x2, y2). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). num_imgs (int): the number of images in a batch. Returns: bbox_targets (list[Tensor]): Regression targets of each level in the form of (l, t, r, b). """ # TODO: Re-implemented in Class PointCoder assert len(decoded_bboxes) == len(mlvl_points) num_levels = len(decoded_bboxes) mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points] bbox_targets = [] for i in range(num_levels): bbox_target = self.bbox_coder.encode(mlvl_points[i], decoded_bboxes[i]) bbox_targets.append(bbox_target) return bbox_targets def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """Override the method in the parent class to avoid changing para's name.""" pass def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points according to feature map size. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `VFNetHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map' 'with `self.fcos_prior_generator.single_level_grid_priors` ') h, w = featmap_size x_range = torch.arange( 0, w * stride, stride, dtype=dtype, device=device) y_range = torch.arange( 0, h * stride, stride, dtype=dtype, device=device) y, x = torch.meshgrid(y_range, x_range) # to be compatible with anchor points in ATSS if self.use_atss: points = torch.stack( (x.reshape(-1), y.reshape(-1)), dim=-1) + \ stride * self.anchor_center_offset else: points = torch.stack( (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 return points ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/yolact_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, ModuleList, force_fp32 from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply from mmdet.core.utils import select_single_mlvl from ..builder import HEADS, build_loss from .anchor_head import AnchorHead @HEADS.register_module() class YOLACTHead(AnchorHead): """YOLACT box head used in https://arxiv.org/abs/1904.02689. Note that YOLACT head is a light version of RetinaNet head. Four differences are described as follows: 1. YOLACT box head has three-times fewer anchors. 2. YOLACT box head shares the convs for box and cls branches. 3. YOLACT box head uses OHEM instead of Focal loss. 4. YOLACT box head predicts a set of mask coefficients for each box. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. anchor_generator (dict): Config dict for anchor generator loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. num_head_convs (int): Number of the conv layers shared by box and cls branches. num_protos (int): Number of the mask coefficients. use_ohem (bool): If true, ``loss_single_OHEM`` will be used for cls loss calculation. If false, ``loss_single`` will be used. conv_cfg (dict): Dictionary to construct and config conv layer. norm_cfg (dict): Dictionary to construct and config norm layer. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs=1, num_protos=32, use_ohem=True, conv_cfg=None, norm_cfg=None, init_cfg=dict( type='Xavier', distribution='uniform', bias=0, layer='Conv2d'), **kwargs): self.num_head_convs = num_head_convs self.num_protos = num_protos self.use_ohem = use_ohem self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(YOLACTHead, self).__init__( num_classes, in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) if self.use_ohem: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.sampling = False def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.head_convs = ModuleList() for i in range(self.num_head_convs): chn = self.in_channels if i == 0 else self.feat_channels self.head_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.conv_coeff = nn.Conv2d( self.feat_channels, self.num_base_priors * self.num_protos, 3, padding=1) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_anchors * 4. coeff_pred (Tensor): Mask coefficients for a single scale \ level, the channels number is num_anchors * num_protos. """ for head_conv in self.head_convs: x = head_conv(x) cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) coeff_pred = self.conv_coeff(x).tanh() return cls_score, bbox_pred, coeff_pred @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """A combination of the func:``AnchorHead.loss`` and func:``SSDHead.loss``. When ``self.use_ohem == True``, it functions like ``SSDHead.loss``, otherwise, it follows ``AnchorHead.loss``. Besides, it additionally returns ``sampling_results``. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Default: None Returns: tuple: dict[str, Tensor]: A dictionary of loss components. List[:obj:``SamplingResult``]: Sampler results for each image. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, unmap_outputs=not self.use_ohem, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results) = cls_reg_targets if self.use_ohem: num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_single_OHEM, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) else: num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """"See func:``SSDHead.loss``.""" loss_cls_all = self.loss_cls(cls_score, labels, label_weights) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) if num_pos_samples == 0: num_neg_samples = neg_inds.size(0) else: num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls[None], loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds')) def get_bboxes(self, cls_scores, bbox_preds, coeff_preds, img_metas, cfg=None, rescale=False): """"Similar to func:``AnchorHead.get_bboxes``, but additionally processes coeff_preds. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) coeff_preds (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W) img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space. Default: False. Returns: list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is a 3-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is an (n,) tensor where each item is the predicted class label of the corresponding box. The third item is an (n, num_protos) tensor where each item is the predicted mask coefficients of instance inside the corresponding box. """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) device = cls_scores[0].device featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) det_bboxes = [] det_labels = [] det_coeffs = [] for img_id in range(len(img_metas)): cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) coeff_pred_list = select_single_mlvl(coeff_preds, img_id) img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list, coeff_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale) det_bboxes.append(bbox_res[0]) det_labels.append(bbox_res[1]) det_coeffs.append(bbox_res[2]) return det_bboxes, det_labels, det_coeffs def _get_bboxes_single(self, cls_score_list, bbox_pred_list, coeff_preds_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): """"Similar to func:``AnchorHead._get_bboxes_single``, but additionally processes coeff_preds_list and uses fast NMS instead of traditional NMS. Args: cls_score_list (list[Tensor]): Box scores for a single scale level Has shape (num_anchors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas for a single scale level with shape (num_anchors * 4, H, W). coeff_preds_list (list[Tensor]): Mask coefficients for a single scale level with shape (num_anchors * num_protos, H, W). mlvl_anchors (list[Tensor]): Box reference for a single scale level with shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Returns: tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is an (n,) tensor where each item is the predicted class label of the corresponding box. The third item is an (n, num_protos) tensor where each item is the predicted mask coefficients of instance inside the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_coeffs = [] for cls_score, bbox_pred, coeff_pred, anchors in \ zip(cls_score_list, bbox_pred_list, coeff_preds_list, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) coeff_pred = coeff_pred.permute(1, 2, 0).reshape(-1, self.num_protos) if 0 < nms_pre < scores.shape[0]: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] coeff_pred = coeff_pred[topk_inds, :] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_coeffs.append(coeff_pred) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) mlvl_coeffs = torch.cat(mlvl_coeffs) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores, mlvl_coeffs, cfg.score_thr, cfg.iou_thr, cfg.top_k, cfg.max_per_img) return det_bboxes, det_labels, det_coeffs @HEADS.register_module() class YOLACTSegmHead(BaseModule): """YOLACT segmentation head used in https://arxiv.org/abs/1904.02689. Apply a semantic segmentation loss on feature space using layers that are only evaluated during training to increase performance with no speed penalty. Args: in_channels (int): Number of channels in the input feature map. num_classes (int): Number of categories excluding the background category. loss_segm (dict): Config of semantic segmentation loss. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels=256, loss_segm=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), init_cfg=dict( type='Xavier', distribution='uniform', override=dict(name='segm_conv'))): super(YOLACTSegmHead, self).__init__(init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.loss_segm = build_loss(loss_segm) self._init_layers() self.fp16_enabled = False def _init_layers(self): """Initialize layers of the head.""" self.segm_conv = nn.Conv2d( self.in_channels, self.num_classes, kernel_size=1) def forward(self, x): """Forward feature from the upstream network. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. Returns: Tensor: Predicted semantic segmentation map with shape (N, num_classes, H, W). """ return self.segm_conv(x) @force_fp32(apply_to=('segm_pred', )) def loss(self, segm_pred, gt_masks, gt_labels): """Compute loss of the head. Args: segm_pred (list[Tensor]): Predicted semantic segmentation map with shape (N, num_classes, H, W). gt_masks (list[Tensor]): Ground truth masks for each image with the same shape of the input image. gt_labels (list[Tensor]): Class indices corresponding to each box. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_segm = [] num_imgs, num_classes, mask_h, mask_w = segm_pred.size() for idx in range(num_imgs): cur_segm_pred = segm_pred[idx] cur_gt_masks = gt_masks[idx].float() cur_gt_labels = gt_labels[idx] segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks, cur_gt_labels) if segm_targets is None: loss = self.loss_segm(cur_segm_pred, torch.zeros_like(cur_segm_pred), torch.zeros_like(cur_segm_pred)) else: loss = self.loss_segm( cur_segm_pred, segm_targets, avg_factor=num_imgs * mask_h * mask_w) loss_segm.append(loss) return dict(loss_segm=loss_segm) def get_targets(self, segm_pred, gt_masks, gt_labels): """Compute semantic segmentation targets for each image. Args: segm_pred (Tensor): Predicted semantic segmentation map with shape (num_classes, H, W). gt_masks (Tensor): Ground truth masks for each image with the same shape of the input image. gt_labels (Tensor): Class indices corresponding to each box. Returns: Tensor: Semantic segmentation targets with shape (num_classes, H, W). """ if gt_masks.size(0) == 0: return None num_classes, mask_h, mask_w = segm_pred.size() with torch.no_grad(): downsampled_masks = F.interpolate( gt_masks.unsqueeze(0), (mask_h, mask_w), mode='bilinear', align_corners=False).squeeze(0) downsampled_masks = downsampled_masks.gt(0.5).float() segm_targets = torch.zeros_like(segm_pred, requires_grad=False) for obj_idx in range(downsampled_masks.size(0)): segm_targets[gt_labels[obj_idx] - 1] = torch.max( segm_targets[gt_labels[obj_idx] - 1], downsampled_masks[obj_idx]) return segm_targets def simple_test(self, feats, img_metas, rescale=False): """Test function without test-time augmentation.""" raise NotImplementedError( 'simple_test of YOLACTSegmHead is not implemented ' 'because this head is only evaluated during training') @HEADS.register_module() class YOLACTProtonet(BaseModule): """YOLACT mask head used in https://arxiv.org/abs/1904.02689. This head outputs the mask prototypes for YOLACT. Args: in_channels (int): Number of channels in the input feature map. proto_channels (tuple[int]): Output channels of protonet convs. proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs. include_last_relu (Bool): If keep the last relu of protonet. num_protos (int): Number of prototypes. num_classes (int): Number of categories excluding the background category. loss_mask_weight (float): Reweight the mask loss by this factor. max_masks_to_train (int): Maximum number of masks to train for each image. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels=256, proto_channels=(256, 256, 256, None, 256, 32), proto_kernel_sizes=(3, 3, 3, -2, 3, 1), include_last_relu=True, num_protos=32, loss_mask_weight=1.0, max_masks_to_train=100, init_cfg=dict( type='Xavier', distribution='uniform', override=dict(name='protonet'))): super(YOLACTProtonet, self).__init__(init_cfg) self.in_channels = in_channels self.proto_channels = proto_channels self.proto_kernel_sizes = proto_kernel_sizes self.include_last_relu = include_last_relu self.protonet = self._init_layers() self.loss_mask_weight = loss_mask_weight self.num_protos = num_protos self.num_classes = num_classes self.max_masks_to_train = max_masks_to_train self.fp16_enabled = False def _init_layers(self): """A helper function to take a config setting and turn it into a network.""" # Possible patterns: # ( 256, 3) -> conv # ( 256,-2) -> deconv # (None,-2) -> bilinear interpolate in_channels = self.in_channels protonets = ModuleList() for num_channels, kernel_size in zip(self.proto_channels, self.proto_kernel_sizes): if kernel_size > 0: layer = nn.Conv2d( in_channels, num_channels, kernel_size, padding=kernel_size // 2) else: if num_channels is None: layer = InterpolateModule( scale_factor=-kernel_size, mode='bilinear', align_corners=False) else: layer = nn.ConvTranspose2d( in_channels, num_channels, -kernel_size, padding=kernel_size // 2) protonets.append(layer) protonets.append(nn.ReLU(inplace=True)) in_channels = num_channels if num_channels is not None \ else in_channels if not self.include_last_relu: protonets = protonets[:-1] return nn.Sequential(*protonets) def forward_dummy(self, x): prototypes = self.protonet(x) return prototypes def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None): """Forward feature from the upstream network to get prototypes and linearly combine the prototypes, using masks coefficients, into instance masks. Finally, crop the instance masks with given bboxes. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. coeff_pred (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W). bboxes (list[Tensor]): Box used for cropping with shape (N, num_anchors * 4, H, W). During training, they are ground truth boxes. During testing, they are predicted boxes. img_meta (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. sampling_results (List[:obj:``SamplingResult``]): Sampler results for each image. Returns: list[Tensor]: Predicted instance segmentation masks. """ prototypes = self.protonet(x) prototypes = prototypes.permute(0, 2, 3, 1).contiguous() num_imgs = x.size(0) # The reason for not using self.training is that # val workflow will have a dimension mismatch error. # Note that this writing method is very tricky. # Fix https://github.com/open-mmlab/mmdetection/issues/5978 is_train_or_val_workflow = (coeff_pred[0].dim() == 4) # Train or val workflow if is_train_or_val_workflow: coeff_pred_list = [] for coeff_pred_per_level in coeff_pred: coeff_pred_per_level = \ coeff_pred_per_level.permute( 0, 2, 3, 1).reshape(num_imgs, -1, self.num_protos) coeff_pred_list.append(coeff_pred_per_level) coeff_pred = torch.cat(coeff_pred_list, dim=1) mask_pred_list = [] for idx in range(num_imgs): cur_prototypes = prototypes[idx] cur_coeff_pred = coeff_pred[idx] cur_bboxes = bboxes[idx] cur_img_meta = img_meta[idx] # Testing state if not is_train_or_val_workflow: bboxes_for_cropping = cur_bboxes else: cur_sampling_results = sampling_results[idx] pos_assigned_gt_inds = \ cur_sampling_results.pos_assigned_gt_inds bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone() pos_inds = cur_sampling_results.pos_inds cur_coeff_pred = cur_coeff_pred[pos_inds] # Linearly combine the prototypes with the mask coefficients mask_pred = cur_prototypes @ cur_coeff_pred.t() mask_pred = torch.sigmoid(mask_pred) h, w = cur_img_meta['img_shape'][:2] bboxes_for_cropping[:, 0] /= w bboxes_for_cropping[:, 1] /= h bboxes_for_cropping[:, 2] /= w bboxes_for_cropping[:, 3] /= h mask_pred = self.crop(mask_pred, bboxes_for_cropping) mask_pred = mask_pred.permute(2, 0, 1).contiguous() mask_pred_list.append(mask_pred) return mask_pred_list @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results): """Compute loss of the head. Args: mask_pred (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). gt_masks (list[Tensor]): Ground truth masks for each image with the same shape of the input image. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. img_meta (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. sampling_results (List[:obj:``SamplingResult``]): Sampler results for each image. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_mask = [] num_imgs = len(mask_pred) total_pos = 0 for idx in range(num_imgs): cur_mask_pred = mask_pred[idx] cur_gt_masks = gt_masks[idx].float() cur_gt_bboxes = gt_bboxes[idx] cur_img_meta = img_meta[idx] cur_sampling_results = sampling_results[idx] pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds num_pos = pos_assigned_gt_inds.size(0) # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if num_pos > self.max_masks_to_train: perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] cur_mask_pred = cur_mask_pred[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train total_pos += num_pos gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds] mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks, pos_assigned_gt_inds) if num_pos == 0: loss = cur_mask_pred.sum() * 0. elif mask_targets is None: loss = F.binary_cross_entropy(cur_mask_pred, torch.zeros_like(cur_mask_pred), torch.zeros_like(cur_mask_pred)) else: cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1) loss = F.binary_cross_entropy( cur_mask_pred, mask_targets, reduction='none') * self.loss_mask_weight h, w = cur_img_meta['img_shape'][:2] gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] - gt_bboxes_for_reweight[:, 0]) / w gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] - gt_bboxes_for_reweight[:, 1]) / h loss = loss.mean(dim=(1, 2)) / gt_bboxes_width / gt_bboxes_height loss = torch.sum(loss) loss_mask.append(loss) if total_pos == 0: total_pos += 1 # avoid nan loss_mask = [x / total_pos for x in loss_mask] return dict(loss_mask=loss_mask) def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds): """Compute instance segmentation targets for each image. Args: mask_pred (Tensor): Predicted prototypes with shape (num_classes, H, W). gt_masks (Tensor): Ground truth masks for each image with the same shape of the input image. pos_assigned_gt_inds (Tensor): GT indices of the corresponding positive samples. Returns: Tensor: Instance segmentation targets with shape (num_instances, H, W). """ if gt_masks.size(0) == 0: return None mask_h, mask_w = mask_pred.shape[-2:] gt_masks = F.interpolate( gt_masks.unsqueeze(0), (mask_h, mask_w), mode='bilinear', align_corners=False).squeeze(0) gt_masks = gt_masks.gt(0.5).float() mask_targets = gt_masks[pos_assigned_gt_inds] return mask_targets def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale): """Resize, binarize, and format the instance mask predictions. Args: mask_pred (Tensor): shape (N, H, W). label_pred (Tensor): shape (N, ). img_meta (dict): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If rescale is False, then returned masks will fit the scale of imgs[0]. Returns: list[ndarray]: Mask predictions grouped by their predicted classes. """ ori_shape = img_meta['ori_shape'] scale_factor = img_meta['scale_factor'] if rescale: img_h, img_w = ori_shape[:2] else: img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32) img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32) cls_segms = [[] for _ in range(self.num_classes)] if mask_pred.size(0) == 0: return cls_segms mask_pred = F.interpolate( mask_pred.unsqueeze(0), (img_h, img_w), mode='bilinear', align_corners=False).squeeze(0) > 0.5 mask_pred = mask_pred.cpu().numpy().astype(np.uint8) for m, l in zip(mask_pred, label_pred): cls_segms[l].append(m) return cls_segms def crop(self, masks, boxes, padding=1): """Crop predicted masks by zeroing out everything not in the predicted bbox. Args: masks (Tensor): shape [H, W, N]. boxes (Tensor): bbox coords in relative point form with shape [N, 4]. Return: Tensor: The cropped masks. """ h, w, n = masks.size() x1, x2 = self.sanitize_coordinates( boxes[:, 0], boxes[:, 2], w, padding, cast=False) y1, y2 = self.sanitize_coordinates( boxes[:, 1], boxes[:, 3], h, padding, cast=False) rows = torch.arange( w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n) cols = torch.arange( h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n) masks_left = rows >= x1.view(1, 1, -1) masks_right = rows < x2.view(1, 1, -1) masks_up = cols >= y1.view(1, 1, -1) masks_down = cols < y2.view(1, 1, -1) crop_mask = masks_left * masks_right * masks_up * masks_down return masks * crop_mask.float() def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True): """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size. Also converts from relative to absolute coordinates and casts the results to long tensors. Warning: this does things in-place behind the scenes so copy if necessary. Args: _x1 (Tensor): shape (N, ). _x2 (Tensor): shape (N, ). img_size (int): Size of the input image. padding (int): x1 >= padding, x2 <= image_size-padding. cast (bool): If cast is false, the result won't be cast to longs. Returns: tuple: x1 (Tensor): Sanitized _x1. x2 (Tensor): Sanitized _x2. """ x1 = x1 * img_size x2 = x2 * img_size if cast: x1 = x1.long() x2 = x2.long() x1 = torch.min(x1, x2) x2 = torch.max(x1, x2) x1 = torch.clamp(x1 - padding, min=0) x2 = torch.clamp(x2 + padding, max=img_size) return x1, x2 def simple_test(self, feats, det_bboxes, det_labels, det_coeffs, img_metas, rescale=False): """Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. det_bboxes (list[Tensor]): BBox results of each image. each element is (n, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. det_labels (list[Tensor]): BBox results of each image. each element is (n, ) tensor, each element represents the class label of the corresponding box. det_coeffs (list[Tensor]): BBox coefficient of each image. each element is (n, m) tensor, m is vector length. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[list]: encoded masks. The c-th item in the outer list corresponds to the c-th class. Given the c-th outer list, the i-th item in that inner list is the mask for the i-th box with class label c. """ num_imgs = len(img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_preds = self.forward(feats[0], det_coeffs, _bboxes, img_metas) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append([[] for _ in range(self.num_classes)]) else: segm_result = self.get_seg_masks(mask_preds[i], det_labels[i], img_metas[i], rescale) segm_results.append(segm_result) return segm_results class InterpolateModule(BaseModule): """This is a module version of F.interpolate. Any arguments you give it just get passed along for the ride. """ def __init__(self, *args, init_cfg=None, **kwargs): super().__init__(init_cfg) self.args = args self.kwargs = kwargs def forward(self, x): """Forward features from the upstream network.""" return F.interpolate(x, *self.args, **self.kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/yolo_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import warnings import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, normal_init) from mmcv.runner import force_fp32 from mmdet.core import (build_assigner, build_bbox_coder, build_prior_generator, build_sampler, images_to_levels, multi_apply, multiclass_nms) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class YOLOV3Head(BaseDenseHead, BBoxTestMixin): """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. Args: num_classes (int): The number of object classes (w/o background) in_channels (List[int]): Number of input channels per scale. out_channels (List[int]): The number of output channels per scale before the final 1x1 layer. Default: (1024, 512, 256). anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. featmap_strides (List[int]): The stride of each scale. Should be in descending order. Default: (32, 16, 8). one_hot_smoother (float): Set a non-zero value to enable label-smooth Default: 0. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). loss_cls (dict): Config of classification loss. loss_conf (dict): Config of confidence loss. loss_xy (dict): Config of xy coordinate loss. loss_wh (dict): Config of wh coordinate loss. train_cfg (dict): Training config of YOLOV3 head. Default: None. test_cfg (dict): Testing config of YOLOV3 head. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels, out_channels=(1024, 512, 256), anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], one_hot_smoother=0., conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_wh=dict(type='MSELoss', loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=dict( type='Normal', std=0.01, override=dict(name='convs_pred'))): super(YOLOV3Head, self).__init__(init_cfg) # Check params assert (len(in_channels) == len(out_channels) == len(featmap_strides)) self.num_classes = num_classes self.in_channels = in_channels self.out_channels = out_channels self.featmap_strides = featmap_strides self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) if hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self.one_hot_smoother = one_hot_smoother self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.bbox_coder = build_bbox_coder(bbox_coder) self.prior_generator = build_prior_generator(anchor_generator) self.loss_cls = build_loss(loss_cls) self.loss_conf = build_loss(loss_conf) self.loss_xy = build_loss(loss_xy) self.loss_wh = build_loss(loss_wh) self.num_base_priors = self.prior_generator.num_base_priors[0] assert len( self.prior_generator.num_base_priors) == len(featmap_strides) self._init_layers() @property def anchor_generator(self): warnings.warn('DeprecationWarning: `anchor_generator` is deprecated, ' 'please use "prior_generator" instead') return self.prior_generator @property def num_anchors(self): """ Returns: int: Number of anchors on each point of feature map. """ warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.num_base_priors @property def num_levels(self): return len(self.featmap_strides) @property def num_attrib(self): """int: number of attributes in pred_map, bboxes (4) + objectness (1) + num_classes""" return 5 + self.num_classes def _init_layers(self): self.convs_bridge = nn.ModuleList() self.convs_pred = nn.ModuleList() for i in range(self.num_levels): conv_bridge = ConvModule( self.in_channels[i], self.out_channels[i], 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) conv_pred = nn.Conv2d(self.out_channels[i], self.num_base_priors * self.num_attrib, 1) self.convs_bridge.append(conv_bridge) self.convs_pred.append(conv_pred) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) # Use prior in model initialization to improve stability for conv_pred, stride in zip(self.convs_pred, self.featmap_strides): bias = conv_pred.bias.reshape(self.num_base_priors, -1) # init objectness with prior of 8 objects per feature map # refer to https://github.com/ultralytics/yolov3 nn.init.constant_(bias.data[:, 4], bias_init_with_prob(8 / (608 / stride)**2)) nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01)) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[Tensor]: A tuple of multi-level predication map, each is a 4D-tensor of shape (batch_size, 5+num_classes, height, width). """ assert len(feats) == self.num_levels pred_maps = [] for i in range(self.num_levels): x = feats[i] x = self.convs_bridge[i](x) pred_map = self.convs_pred[i](x) pred_maps.append(pred_map) return tuple(pred_maps), @force_fp32(apply_to=('pred_maps', )) def get_bboxes(self, pred_maps, img_metas, cfg=None, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. It has been accelerated since PR #5991. Args: pred_maps (list[Tensor]): Raw predictions for a batch of images. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. The shape of the second tensor in the tuple is (n,), and each element represents the class label of the corresponding box. """ assert len(pred_maps) == self.num_levels cfg = self.test_cfg if cfg is None else cfg scale_factors = np.array( [img_meta['scale_factor'] for img_meta in img_metas]) num_imgs = len(img_metas) featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=pred_maps[0].device) flatten_preds = [] flatten_strides = [] for pred, stride in zip(pred_maps, self.featmap_strides): pred = pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) pred[..., :2].sigmoid_() flatten_preds.append(pred) flatten_strides.append( pred.new_tensor(stride).expand(pred.size(1))) flatten_preds = torch.cat(flatten_preds, dim=1) flatten_bbox_preds = flatten_preds[..., :4] flatten_objectness = flatten_preds[..., 4].sigmoid() flatten_cls_scores = flatten_preds[..., 5:].sigmoid() flatten_anchors = torch.cat(mlvl_anchors) flatten_strides = torch.cat(flatten_strides) flatten_bboxes = self.bbox_coder.decode(flatten_anchors, flatten_bbox_preds, flatten_strides.unsqueeze(-1)) if with_nms and (flatten_objectness.size(0) == 0): return torch.zeros((0, 5)), torch.zeros((0, )) if rescale: flatten_bboxes /= flatten_bboxes.new_tensor( scale_factors).unsqueeze(1) padding = flatten_bboxes.new_zeros(num_imgs, flatten_bboxes.shape[1], 1) flatten_cls_scores = torch.cat([flatten_cls_scores, padding], dim=-1) det_results = [] for (bboxes, scores, objectness) in zip(flatten_bboxes, flatten_cls_scores, flatten_objectness): # Filtering out all predictions with conf < conf_thr conf_thr = cfg.get('conf_thr', -1) if conf_thr > 0: conf_inds = objectness >= conf_thr bboxes = bboxes[conf_inds, :] scores = scores[conf_inds, :] objectness = objectness[conf_inds] det_bboxes, det_labels = multiclass_nms( bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=objectness) det_results.append(tuple([det_bboxes, det_labels])) return det_results @force_fp32(apply_to=('pred_maps', )) def loss(self, pred_maps, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: pred_maps (list[Tensor]): Prediction map for each scale level, shape (N, num_anchors * num_attrib, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = len(img_metas) device = pred_maps[0][0].device featmap_sizes = [ pred_maps[i].shape[-2:] for i in range(self.num_levels) ] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [mlvl_anchors for _ in range(num_imgs)] responsible_flag_list = [] for img_id in range(len(img_metas)): responsible_flag_list.append( self.prior_generator.responsible_flags(featmap_sizes, gt_bboxes[img_id], device)) target_maps_list, neg_maps_list = self.get_targets( anchor_list, responsible_flag_list, gt_bboxes, gt_labels) losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( self.loss_single, pred_maps, target_maps_list, neg_maps_list) return dict( loss_cls=losses_cls, loss_conf=losses_conf, loss_xy=losses_xy, loss_wh=losses_wh) def loss_single(self, pred_map, target_map, neg_map): """Compute loss of a single image from a batch. Args: pred_map (Tensor): Raw predictions for a single level. target_map (Tensor): The Ground-Truth target for a single level. neg_map (Tensor): The negative masks for a single level. Returns: tuple: loss_cls (Tensor): Classification loss. loss_conf (Tensor): Confidence loss. loss_xy (Tensor): Regression loss of x, y coordinate. loss_wh (Tensor): Regression loss of w, h coordinate. """ num_imgs = len(pred_map) pred_map = pred_map.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) neg_mask = neg_map.float() pos_mask = target_map[..., 4] pos_and_neg_mask = neg_mask + pos_mask pos_mask = pos_mask.unsqueeze(dim=-1) if torch.max(pos_and_neg_mask) > 1.: warnings.warn('There is overlap between pos and neg sample.') pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) pred_xy = pred_map[..., :2] pred_wh = pred_map[..., 2:4] pred_conf = pred_map[..., 4] pred_label = pred_map[..., 5:] target_xy = target_map[..., :2] target_wh = target_map[..., 2:4] target_conf = target_map[..., 4] target_label = target_map[..., 5:] loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) loss_conf = self.loss_conf( pred_conf, target_conf, weight=pos_and_neg_mask) loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) return loss_cls, loss_conf, loss_xy, loss_wh def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list, gt_labels_list): """Compute target maps for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_total_anchors, 4). responsible_flag_list (list[list[Tensor]]): Multi level responsible flags of each image. Each element is a tensor of shape (num_total_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. gt_labels_list (list[Tensor]): Ground truth labels of each box. Returns: tuple: Usually returns a tuple containing learning targets. - target_map_list (list[Tensor]): Target map of each level. - neg_map_list (list[Tensor]): Negative map of each level. """ num_imgs = len(anchor_list) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] results = multi_apply(self._get_targets_single, anchor_list, responsible_flag_list, gt_bboxes_list, gt_labels_list) all_target_maps, all_neg_maps = results assert num_imgs == len(all_target_maps) == len(all_neg_maps) target_maps_list = images_to_levels(all_target_maps, num_level_anchors) neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) return target_maps_list, neg_maps_list def _get_targets_single(self, anchors, responsible_flags, gt_bboxes, gt_labels): """Generate matching bounding box prior and converted GT. Args: anchors (list[Tensor]): Multi-level anchors of the image. responsible_flags (list[Tensor]): Multi-level responsible flags of anchors gt_bboxes (Tensor): Ground truth bboxes of single image. gt_labels (Tensor): Ground truth labels of single image. Returns: tuple: target_map (Tensor): Predication target map of each scale level, shape (num_total_anchors, 5+num_classes) neg_map (Tensor): Negative map of each scale level, shape (num_total_anchors,) """ anchor_strides = [] for i in range(len(anchors)): anchor_strides.append( torch.tensor(self.featmap_strides[i], device=gt_bboxes.device).repeat(len(anchors[i]))) concat_anchors = torch.cat(anchors) concat_responsible_flags = torch.cat(responsible_flags) anchor_strides = torch.cat(anchor_strides) assert len(anchor_strides) == len(concat_anchors) == \ len(concat_responsible_flags) assign_result = self.assigner.assign(concat_anchors, concat_responsible_flags, gt_bboxes) sampling_result = self.sampler.sample(assign_result, concat_anchors, gt_bboxes) target_map = concat_anchors.new_zeros( concat_anchors.size(0), self.num_attrib) target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, anchor_strides[sampling_result.pos_inds]) target_map[sampling_result.pos_inds, 4] = 1 gt_labels_one_hot = F.one_hot( gt_labels, num_classes=self.num_classes).float() if self.one_hot_smoother != 0: # label smooth gt_labels_one_hot = gt_labels_one_hot * ( 1 - self.one_hot_smoother ) + self.one_hot_smoother / self.num_classes target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ sampling_result.pos_assigned_gt_inds] neg_map = concat_anchors.new_zeros( concat_anchors.size(0), dtype=torch.uint8) neg_map[sampling_result.neg_inds] = 1 return target_map, neg_map def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale) @force_fp32(apply_to=('pred_maps')) def onnx_export(self, pred_maps, img_metas, with_nms=True): num_levels = len(pred_maps) pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)] cfg = self.test_cfg assert len(pred_maps_list) == self.num_levels device = pred_maps_list[0].device batch_size = pred_maps_list[0].shape[0] featmap_sizes = [ pred_maps_list[i].shape[-2:] for i in range(self.num_levels) ] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) # convert to tensor to keep tracing nms_pre_tensor = torch.tensor( cfg.get('nms_pre', -1), device=device, dtype=torch.long) multi_lvl_bboxes = [] multi_lvl_cls_scores = [] multi_lvl_conf_scores = [] for i in range(self.num_levels): # get some key info for current scale pred_map = pred_maps_list[i] stride = self.featmap_strides[i] # (b,h, w, num_anchors*num_attrib) -> # (b,h*w*num_anchors, num_attrib) pred_map = pred_map.permute(0, 2, 3, 1).reshape(batch_size, -1, self.num_attrib) # Inplace operation like # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])``` # would create constant tensor when exporting to onnx pred_map_conf = torch.sigmoid(pred_map[..., :2]) pred_map_rest = pred_map[..., 2:] pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1) pred_map_boxes = pred_map[..., :4] multi_lvl_anchor = mlvl_anchors[i] multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes) bbox_pred = self.bbox_coder.decode(multi_lvl_anchor, pred_map_boxes, stride) # conf and cls conf_pred = torch.sigmoid(pred_map[..., 4]) cls_pred = torch.sigmoid(pred_map[..., 5:]).view( batch_size, -1, self.num_classes) # Cls pred one-hot. # Get top-k prediction from mmdet.core.export import get_k_for_topk nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) if nms_pre > 0: _, topk_inds = conf_pred.topk(nms_pre) batch_inds = torch.arange(batch_size).view( -1, 1).expand_as(topk_inds).long() # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = ( bbox_pred.shape[1] * batch_inds + topk_inds) bbox_pred = bbox_pred.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) cls_pred = cls_pred.reshape( -1, self.num_classes)[transformed_inds, :].reshape( batch_size, -1, self.num_classes) conf_pred = conf_pred.reshape(-1, 1)[transformed_inds].reshape( batch_size, -1) # Save the result of current scale multi_lvl_bboxes.append(bbox_pred) multi_lvl_cls_scores.append(cls_pred) multi_lvl_conf_scores.append(conf_pred) # Merge the results of different scales together batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1) batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1) batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1) # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment from mmdet.core.export import add_dummy_nms_for_onnx conf_thr = cfg.get('conf_thr', -1) score_thr = cfg.get('score_thr', -1) # follow original pipeline of YOLOv3 if conf_thr > 0: mask = (batch_mlvl_conf_scores >= conf_thr).float() batch_mlvl_conf_scores *= mask if score_thr > 0: mask = (batch_mlvl_scores > score_thr).float() batch_mlvl_scores *= mask batch_mlvl_conf_scores = batch_mlvl_conf_scores.unsqueeze(2).expand_as( batch_mlvl_scores) batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_conf_scores if with_nms: max_output_boxes_per_class = cfg.nms.get( 'max_output_boxes_per_class', 200) iou_threshold = cfg.nms.get('iou_threshold', 0.5) # keep aligned with original pipeline, improve # mAP by 1% for YOLOv3 in ONNX score_threshold = 0 nms_pre = cfg.get('deploy_nms_pre', -1) return add_dummy_nms_for_onnx( batch_mlvl_bboxes, batch_mlvl_scores, max_output_boxes_per_class, iou_threshold, score_threshold, nms_pre, cfg.max_per_img, ) else: return batch_mlvl_bboxes, batch_mlvl_scores ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/yolof_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, normal_init) from mmcv.runner import force_fp32 from mmdet.core import anchor_inside_flags, multi_apply, reduce_mean, unmap from ..builder import HEADS from .anchor_head import AnchorHead INF = 1e8 def levels_to_images(mlvl_tensor): """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[torch.Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] @HEADS.register_module() class YOLOFHead(AnchorHead): """YOLOFHead Paper link: https://arxiv.org/abs/2103.09460. Args: num_classes (int): The number of object classes (w/o background) in_channels (List[int]): The number of input channels per scale. cls_num_convs (int): The number of convolutions of cls branch. Default 2. reg_num_convs (int): The number of convolutions of reg branch. Default 4. norm_cfg (dict): Dictionary to construct and config norm layer. """ def __init__(self, num_classes, in_channels, num_cls_convs=2, num_reg_convs=4, norm_cfg=dict(type='BN', requires_grad=True), **kwargs): self.num_cls_convs = num_cls_convs self.num_reg_convs = num_reg_convs self.norm_cfg = norm_cfg super(YOLOFHead, self).__init__(num_classes, in_channels, **kwargs) def _init_layers(self): cls_subnet = [] bbox_subnet = [] for i in range(self.num_cls_convs): cls_subnet.append( ConvModule( self.in_channels, self.in_channels, kernel_size=3, padding=1, norm_cfg=self.norm_cfg)) for i in range(self.num_reg_convs): bbox_subnet.append( ConvModule( self.in_channels, self.in_channels, kernel_size=3, padding=1, norm_cfg=self.norm_cfg)) self.cls_subnet = nn.Sequential(*cls_subnet) self.bbox_subnet = nn.Sequential(*bbox_subnet) self.cls_score = nn.Conv2d( self.in_channels, self.num_base_priors * self.num_classes, kernel_size=3, stride=1, padding=1) self.bbox_pred = nn.Conv2d( self.in_channels, self.num_base_priors * 4, kernel_size=3, stride=1, padding=1) self.object_pred = nn.Conv2d( self.in_channels, self.num_base_priors, kernel_size=3, stride=1, padding=1) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) # Use prior in model initialization to improve stability bias_cls = bias_init_with_prob(0.01) torch.nn.init.constant_(self.cls_score.bias, bias_cls) def forward_single(self, feature): cls_score = self.cls_score(self.cls_subnet(feature)) N, _, H, W = cls_score.shape cls_score = cls_score.view(N, -1, self.num_classes, H, W) reg_feat = self.bbox_subnet(feature) bbox_reg = self.bbox_pred(reg_feat) objectness = self.object_pred(reg_feat) # implicit objectness objectness = objectness.view(N, -1, 1, H, W) normalized_cls_score = cls_score + objectness - torch.log( 1. + torch.clamp(cls_score.exp(), max=INF) + torch.clamp(objectness.exp(), max=INF)) normalized_cls_score = normalized_cls_score.view(N, -1, H, W) return normalized_cls_score, bbox_reg @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (batch, num_anchors * num_classes, h, w) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (batch, num_anchors * 4, h, w) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == 1 assert self.prior_generator.num_levels == 1 device = cls_scores[0].device featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) # The output level is always 1 anchor_list = [anchors[0] for anchors in anchor_list] valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list] cls_scores_list = levels_to_images(cls_scores) bbox_preds_list = levels_to_images(bbox_preds) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( cls_scores_list, bbox_preds_list, anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (batch_labels, batch_label_weights, num_total_pos, num_total_neg, batch_bbox_weights, batch_pos_predicted_boxes, batch_target_boxes) = cls_reg_targets flatten_labels = batch_labels.reshape(-1) batch_label_weights = batch_label_weights.reshape(-1) cls_score = cls_scores[0].permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) num_total_samples = (num_total_pos + num_total_neg) if self.sampling else num_total_pos num_total_samples = reduce_mean( cls_score.new_tensor(num_total_samples)).clamp_(1.0).item() # classification loss loss_cls = self.loss_cls( cls_score, flatten_labels, batch_label_weights, avg_factor=num_total_samples) # regression loss if batch_pos_predicted_boxes.shape[0] == 0: # no pos sample loss_bbox = batch_pos_predicted_boxes.sum() * 0 else: loss_bbox = self.loss_bbox( batch_pos_predicted_boxes, batch_target_boxes, batch_bbox_weights.float(), avg_factor=num_total_samples) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets(self, cls_scores_list, bbox_preds_list, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in multiple images. Args: cls_scores_list (list[Tensor]): Classification scores of each image. each is a 4D-tensor, the shape is (h * w, num_anchors * num_classes). bbox_preds_list (list[Tensor]): Bbox preds of each image. each is a 4D-tensor, the shape is (h * w, num_anchors * 4). anchor_list (list[Tensor]): Anchors of each image. Each element of is a tensor of shape (h * w * num_anchors, 4). valid_flag_list (list[Tensor]): Valid flags of each image. Each element of is a tensor of shape (h * w * num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - batch_labels (Tensor): Label of all images. Each element \ of is a tensor of shape (batch, h * w * num_anchors) - batch_label_weights (Tensor): Label weights of all images \ of is a tensor of shape (batch, h * w * num_anchors) - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] results = multi_apply( self._get_targets_single, bbox_preds_list, anchor_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) (all_labels, all_label_weights, pos_inds_list, neg_inds_list, sampling_results_list) = results[:5] rest_results = list(results[5:]) # user-added return values # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) batch_labels = torch.stack(all_labels, 0) batch_label_weights = torch.stack(all_label_weights, 0) res = (batch_labels, batch_label_weights, num_total_pos, num_total_neg) for i, rests in enumerate(rest_results): # user-added return values rest_results[i] = torch.cat(rests, 0) return res + tuple(rest_results) def _get_targets_single(self, bbox_preds, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Args: bbox_preds (Tensor): Bbox prediction of the image, which shape is (h * w ,4) flat_anchors (Tensor): Anchors of the image, which shape is (h * w * num_anchors ,4) valid_flags (Tensor): Valid flags of the image, which shape is (h * w * num_anchors,). gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). img_meta (dict): Meta info of the image. gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: labels (Tensor): Labels of image, which shape is (h * w * num_anchors, ). label_weights (Tensor): Label weights of image, which shape is (h * w * num_anchors, ). pos_inds (Tensor): Pos index of image. neg_inds (Tensor): Neg index of image. sampling_result (obj:`SamplingResult`): Sampling result. pos_bbox_weights (Tensor): The Weight of using to calculate the bbox branch loss, which shape is (num, ). pos_predicted_boxes (Tensor): boxes predicted value of using to calculate the bbox branch loss, which shape is (num, 4). pos_target_boxes (Tensor): boxes target value of using to calculate the bbox branch loss, which shape is (num, 4). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 8 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] bbox_preds = bbox_preds.reshape(-1, 4) bbox_preds = bbox_preds[inside_flags, :] # decoded bbox decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds) assign_result = self.assigner.assign( decoder_bbox_preds, anchors, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) pos_bbox_weights = assign_result.get_extra_property('pos_idx') pos_predicted_boxes = assign_result.get_extra_property( 'pos_predicted_boxes') pos_target_boxes = assign_result.get_extra_property('target_boxes') sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) return (labels, label_weights, pos_inds, neg_inds, sampling_result, pos_bbox_weights, pos_predicted_boxes, pos_target_boxes) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/yolox_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, bias_init_with_prob) from mmcv.ops.nms import batched_nms from mmcv.runner import force_fp32 from mmdet.core import (MlvlPointGenerator, bbox_xyxy_to_cxcywh, build_assigner, build_sampler, multi_apply, reduce_mean) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class YOLOXHead(BaseDenseHead, BBoxTestMixin): """YOLOXHead head used in `YOLOX `_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels in stacking convs. Default: 256 stacked_convs (int): Number of stacking convs of the head. Default: 2. strides (tuple): Downsample factor of each feature map. use_depthwise (bool): Whether to depthwise separable convolution in blocks. Default: False dcn_on_last_conv (bool): If true, use dcn in the last layer of towers. Default: False. conv_bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. act_cfg (dict): Config dict for activation layer. Default: None. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. loss_obj (dict): Config of objectness loss. loss_l1 (dict): Config of L1 loss. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=2, strides=[8, 16, 32], use_depthwise=False, dcn_on_last_conv=False, conv_bias='auto', conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=1.0), loss_bbox=dict( type='IoULoss', mode='square', eps=1e-16, reduction='sum', loss_weight=5.0), loss_obj=dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=1.0), loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=dict( type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')): super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.cls_out_channels = num_classes self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.use_depthwise = use_depthwise self.dcn_on_last_conv = dcn_on_last_conv assert conv_bias == 'auto' or isinstance(conv_bias, bool) self.conv_bias = conv_bias self.use_sigmoid_cls = True self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.loss_obj = build_loss(loss_obj) self.use_l1 = False # This flag will be modified by hooks. self.loss_l1 = build_loss(loss_l1) self.prior_generator = MlvlPointGenerator(strides, offset=0) self.test_cfg = test_cfg self.train_cfg = train_cfg self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self._init_layers() def _init_layers(self): self.multi_level_cls_convs = nn.ModuleList() self.multi_level_reg_convs = nn.ModuleList() self.multi_level_conv_cls = nn.ModuleList() self.multi_level_conv_reg = nn.ModuleList() self.multi_level_conv_obj = nn.ModuleList() for _ in self.strides: self.multi_level_cls_convs.append(self._build_stacked_convs()) self.multi_level_reg_convs.append(self._build_stacked_convs()) conv_cls, conv_reg, conv_obj = self._build_predictor() self.multi_level_conv_cls.append(conv_cls) self.multi_level_conv_reg.append(conv_reg) self.multi_level_conv_obj.append(conv_obj) def _build_stacked_convs(self): """Initialize conv layers of a single level head.""" conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule stacked_convs = [] for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg stacked_convs.append( conv( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, bias=self.conv_bias)) return nn.Sequential(*stacked_convs) def _build_predictor(self): """Initialize predictor layers of a single level head.""" conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) conv_reg = nn.Conv2d(self.feat_channels, 4, 1) conv_obj = nn.Conv2d(self.feat_channels, 1, 1) return conv_cls, conv_reg, conv_obj def init_weights(self): super(YOLOXHead, self).init_weights() # Use prior in model initialization to improve stability bias_init = bias_init_with_prob(0.01) for conv_cls, conv_obj in zip(self.multi_level_conv_cls, self.multi_level_conv_obj): conv_cls.bias.data.fill_(bias_init) conv_obj.bias.data.fill_(bias_init) def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg, conv_obj): """Forward feature of a single scale level.""" cls_feat = cls_convs(x) reg_feat = reg_convs(x) cls_score = conv_cls(cls_feat) bbox_pred = conv_reg(reg_feat) objectness = conv_obj(reg_feat) return cls_score, bbox_pred, objectness def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[Tensor]: A tuple of multi-level predication map, each is a 4D-tensor of shape (batch_size, 5+num_classes, height, width). """ return multi_apply(self.forward_single, feats, self.multi_level_cls_convs, self.multi_level_reg_convs, self.multi_level_conv_cls, self.multi_level_conv_reg, self.multi_level_conv_obj) @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) def get_bboxes(self, cls_scores, bbox_preds, objectnesses, img_metas=None, cfg=None, rescale=False, with_nms=True): """Transform network outputs of a batch into bbox results. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). img_metas (list[dict], Optional): Image meta info. Default None. cfg (mmcv.Config, Optional): Test / postprocessing configuration, if None, test_cfg would be used. Default None. rescale (bool): If True, return boxes in original image space. Default False. with_nms (bool): If True, do nms before return boxes. Default True. Returns: list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ assert len(cls_scores) == len(bbox_preds) == len(objectnesses) cfg = self.test_cfg if cfg is None else cfg scale_factors = np.array( [img_meta['scale_factor'] for img_meta in img_metas]) num_imgs = len(img_metas) featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device, with_stride=True) # flatten cls_scores, bbox_preds and objectness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_objectness = [ objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) for objectness in objectnesses ] flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() flatten_priors = torch.cat(mlvl_priors) flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) if rescale: flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor( scale_factors).unsqueeze(1) result_list = [] for img_id in range(len(img_metas)): cls_scores = flatten_cls_scores[img_id] score_factor = flatten_objectness[img_id] bboxes = flatten_bboxes[img_id] result_list.append( self._bboxes_nms(cls_scores, bboxes, score_factor, cfg)) return result_list def _bbox_decode(self, priors, bbox_preds): xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2] whs = bbox_preds[..., 2:].exp() * priors[:, 2:] tl_x = (xys[..., 0] - whs[..., 0] / 2) tl_y = (xys[..., 1] - whs[..., 1] / 2) br_x = (xys[..., 0] + whs[..., 0] / 2) br_y = (xys[..., 1] + whs[..., 1] / 2) decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) return decoded_bboxes def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg): max_scores, labels = torch.max(cls_scores, 1) valid_mask = score_factor * max_scores >= cfg.score_thr bboxes = bboxes[valid_mask] scores = max_scores[valid_mask] * score_factor[valid_mask] labels = labels[valid_mask] if labels.numel() == 0: return bboxes, labels else: dets, keep = batched_nms(bboxes, scores, labels, cfg.nms) return dets, labels[keep] @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) def loss(self, cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_priors * 4. objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. """ num_imgs = len(img_metas) featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device, with_stride=True) flatten_cls_preds = [ cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_pred in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_objectness = [ objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) for objectness in objectnesses ] flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) flatten_objectness = torch.cat(flatten_objectness, dim=1) flatten_priors = torch.cat(mlvl_priors) flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, num_fg_imgs) = multi_apply( self._get_target_single, flatten_cls_preds.detach(), flatten_objectness.detach(), flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1), flatten_bboxes.detach(), gt_bboxes, gt_labels) # The experimental results show that ‘reduce_mean’ can improve # performance on the COCO dataset. num_pos = torch.tensor( sum(num_fg_imgs), dtype=torch.float, device=flatten_cls_preds.device) num_total_samples = max(reduce_mean(num_pos), 1.0) pos_masks = torch.cat(pos_masks, 0) cls_targets = torch.cat(cls_targets, 0) obj_targets = torch.cat(obj_targets, 0) bbox_targets = torch.cat(bbox_targets, 0) if self.use_l1: l1_targets = torch.cat(l1_targets, 0) loss_bbox = self.loss_bbox( flatten_bboxes.view(-1, 4)[pos_masks], bbox_targets) / num_total_samples loss_obj = self.loss_obj(flatten_objectness.view(-1, 1), obj_targets) / num_total_samples loss_cls = self.loss_cls( flatten_cls_preds.view(-1, self.num_classes)[pos_masks], cls_targets) / num_total_samples loss_dict = dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj) if self.use_l1: loss_l1 = self.loss_l1( flatten_bbox_preds.view(-1, 4)[pos_masks], l1_targets) / num_total_samples loss_dict.update(loss_l1=loss_l1) return loss_dict @torch.no_grad() def _get_target_single(self, cls_preds, objectness, priors, decoded_bboxes, gt_bboxes, gt_labels): """Compute classification, regression, and objectness targets for priors in a single image. Args: cls_preds (Tensor): Classification predictions of one image, a 2D-Tensor with shape [num_priors, num_classes] objectness (Tensor): Objectness predictions of one image, a 1D-Tensor with shape [num_priors] priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. decoded_bboxes (Tensor): Decoded bboxes predictions of one image, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth labels of one image, a Tensor with shape [num_gts]. """ num_priors = priors.size(0) num_gts = gt_labels.size(0) gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype) # No target if num_gts == 0: cls_target = cls_preds.new_zeros((0, self.num_classes)) bbox_target = cls_preds.new_zeros((0, 4)) l1_target = cls_preds.new_zeros((0, 4)) obj_target = cls_preds.new_zeros((num_priors, 1)) foreground_mask = cls_preds.new_zeros(num_priors).bool() return (foreground_mask, cls_target, obj_target, bbox_target, l1_target, 0) # YOLOX uses center priors with 0.5 offset to assign targets, # but use center priors without offset to regress bboxes. offset_priors = torch.cat( [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1) assign_result = self.assigner.assign( cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid(), offset_priors, decoded_bboxes, gt_bboxes, gt_labels) sampling_result = self.sampler.sample(assign_result, priors, gt_bboxes) pos_inds = sampling_result.pos_inds num_pos_per_img = pos_inds.size(0) pos_ious = assign_result.max_overlaps[pos_inds] # IOU aware classification score cls_target = F.one_hot(sampling_result.pos_gt_labels, self.num_classes) * pos_ious.unsqueeze(-1) obj_target = torch.zeros_like(objectness).unsqueeze(-1) obj_target[pos_inds] = 1 bbox_target = sampling_result.pos_gt_bboxes l1_target = cls_preds.new_zeros((num_pos_per_img, 4)) if self.use_l1: l1_target = self._get_l1_target(l1_target, bbox_target, priors[pos_inds]) foreground_mask = torch.zeros_like(objectness).to(torch.bool) foreground_mask[pos_inds] = 1 return (foreground_mask, cls_target, obj_target, bbox_target, l1_target, num_pos_per_img) def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8): """Convert gt bboxes to center offset and log width height.""" gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes) l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:] l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps) return l1_target ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .atss import ATSS from .autoassign import AutoAssign from .base import BaseDetector from .cascade_rcnn import CascadeRCNN from .centernet import CenterNet from .cornernet import CornerNet from .ddod import DDOD from .deformable_detr import DeformableDETR from .detr import DETR from .fast_rcnn import FastRCNN from .faster_rcnn import FasterRCNN from .fcos import FCOS from .fovea import FOVEA from .fsaf import FSAF from .gfl import GFL from .grid_rcnn import GridRCNN from .htc import HybridTaskCascade from .kd_one_stage import KnowledgeDistillationSingleStageDetector from .lad import LAD from .mask2former import Mask2Former from .mask_rcnn import MaskRCNN from .mask_scoring_rcnn import MaskScoringRCNN from .maskformer import MaskFormer from .nasfcos import NASFCOS from .paa import PAA from .panoptic_fpn import PanopticFPN from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor from .point_rend import PointRend from .queryinst import QueryInst from .reppoints_detector import RepPointsDetector from .retinanet import RetinaNet from .rpn import RPN from .scnet import SCNet from .single_stage import SingleStageDetector from .solo import SOLO from .solov2 import SOLOv2 from .sparse_rcnn import SparseRCNN from .tood import TOOD from .trident_faster_rcnn import TridentFasterRCNN from .two_stage import TwoStageDetector from .vfnet import VFNet from .yolact import YOLACT from .yolo import YOLOV3 from .yolof import YOLOF from .yolox import YOLOX __all__ = [ 'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', 'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF', 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT', 'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO', 'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX', 'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD', 'MaskFormer', 'DDOD', 'Mask2Former' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/atss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class ATSS(SingleStageDetector): """Implementation of `ATSS `_.""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/autoassign.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class AutoAssign(SingleStageDetector): """Implementation of `AutoAssign: Differentiable Label Assignment for Dense Object Detection `_.""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/base.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from collections import OrderedDict import mmcv import numpy as np import torch import torch.distributed as dist from mmcv.runner import BaseModule, auto_fp16 from mmdet.core.visualization import imshow_det_bboxes class BaseDetector(BaseModule, metaclass=ABCMeta): """Base class for detectors.""" def __init__(self, init_cfg=None): super(BaseDetector, self).__init__(init_cfg) self.fp16_enabled = False @property def with_neck(self): """bool: whether the detector has a neck""" return hasattr(self, 'neck') and self.neck is not None # TODO: these properties need to be carefully handled # for both single stage & two stage detectors @property def with_shared_head(self): """bool: whether the detector has a shared head in the RoI Head""" return hasattr(self, 'roi_head') and self.roi_head.with_shared_head @property def with_bbox(self): """bool: whether the detector has a bbox head""" return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) or (hasattr(self, 'bbox_head') and self.bbox_head is not None)) @property def with_mask(self): """bool: whether the detector has a mask head""" return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) or (hasattr(self, 'mask_head') and self.mask_head is not None)) @abstractmethod def extract_feat(self, imgs): """Extract features from images.""" pass def extract_feats(self, imgs): """Extract features from multiple images. Args: imgs (list[torch.Tensor]): A list of images. The images are augmented from the same image but in different ways. Returns: list[torch.Tensor]: Features of different images """ assert isinstance(imgs, list) return [self.extract_feat(img) for img in imgs] def forward_train(self, imgs, img_metas, **kwargs): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[dict]): List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. kwargs (keyword arguments): Specific to concrete implementation. """ # NOTE the batched image size information may be useful, e.g. # in DETR, this is needed for the construction of masks, which is # then used for the transformer_head. batch_input_shape = tuple(imgs[0].size()[-2:]) for img_meta in img_metas: img_meta['batch_input_shape'] = batch_input_shape async def async_simple_test(self, img, img_metas, **kwargs): raise NotImplementedError @abstractmethod def simple_test(self, img, img_metas, **kwargs): pass @abstractmethod def aug_test(self, imgs, img_metas, **kwargs): """Test function with test time augmentation.""" pass async def aforward_test(self, *, img, img_metas, **kwargs): for var, name in [(img, 'img'), (img_metas, 'img_metas')]: if not isinstance(var, list): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(img) if num_augs != len(img_metas): raise ValueError(f'num of augmentations ({len(img)}) ' f'!= num of image metas ({len(img_metas)})') # TODO: remove the restriction of samples_per_gpu == 1 when prepared samples_per_gpu = img[0].size(0) assert samples_per_gpu == 1 if num_augs == 1: return await self.async_simple_test(img[0], img_metas[0], **kwargs) else: raise NotImplementedError def forward_test(self, imgs, img_metas, **kwargs): """ Args: imgs (List[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains all images in the batch. img_metas (List[List[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. """ for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if not isinstance(var, list): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(imgs) if num_augs != len(img_metas): raise ValueError(f'num of augmentations ({len(imgs)}) ' f'!= num of image meta ({len(img_metas)})') # NOTE the batched image size information may be useful, e.g. # in DETR, this is needed for the construction of masks, which is # then used for the transformer_head. for img, img_meta in zip(imgs, img_metas): batch_size = len(img_meta) for img_id in range(batch_size): img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:]) if num_augs == 1: # proposals (List[List[Tensor]]): the outer list indicates # test-time augs (multiscale, flip, etc.) and the inner list # indicates images in a batch. # The Tensor should have a shape Px4, where P is the number of # proposals. if 'proposals' in kwargs: kwargs['proposals'] = kwargs['proposals'][0] return self.simple_test(imgs[0], img_metas[0], **kwargs) else: assert imgs[0].size(0) == 1, 'aug test does not support ' \ 'inference with batch size ' \ f'{imgs[0].size(0)}' # TODO: support test augmentation for predefined proposals assert 'proposals' not in kwargs return self.aug_test(imgs, img_metas, **kwargs) @auto_fp16(apply_to=('img', )) def forward(self, img, img_metas, return_loss=True, **kwargs): """Calls either :func:`forward_train` or :func:`forward_test` depending on whether ``return_loss`` is ``True``. Note this setting will change the expected inputs. When ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor and List[dict]), and when ``resturn_loss=False``, img and img_meta should be double nested (i.e. List[Tensor], List[List[dict]]), with the outer list indicating test time augmentations. """ if torch.onnx.is_in_onnx_export(): assert len(img_metas) == 1 return self.onnx_export(img[0], img_metas[0]) if return_loss: return self.forward_train(img, img_metas, **kwargs) else: return self.forward_test(img, img_metas, **kwargs) def _parse_losses(self, losses): """Parse the raw outputs (losses) of the network. Args: losses (dict): Raw output of the network, which usually contain losses and other necessary information. Returns: tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \ which may be a weighted sum of all losses, log_vars contains \ all the variables to be sent to the logger. """ log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError( f'{loss_name} is not a tensor or list of tensors') loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) # If the loss_vars has different length, GPUs will wait infinitely if dist.is_available() and dist.is_initialized(): log_var_length = torch.tensor(len(log_vars), device=loss.device) dist.all_reduce(log_var_length) message = (f'rank {dist.get_rank()}' + f' len(log_vars): {len(log_vars)}' + ' keys: ' + ','.join(log_vars.keys())) assert log_var_length == len(log_vars) * dist.get_world_size(), \ 'loss log variables are different across GPUs!\n' + message log_vars['loss'] = loss for loss_name, loss_value in log_vars.items(): # reduce loss when distributed training if dist.is_available() and dist.is_initialized(): loss_value = loss_value.data.clone() dist.all_reduce(loss_value.div_(dist.get_world_size())) log_vars[loss_name] = loss_value.item() return loss, log_vars def train_step(self, data, optimizer): """The iteration step during training. This method defines an iteration step during training, except for the back propagation and optimizer updating, which are done in an optimizer hook. Note that in some complicated cases or models, the whole process including back propagation and optimizer updating is also defined in this method, such as GAN. Args: data (dict): The output of dataloader. optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of runner is passed to ``train_step()``. This argument is unused and reserved. Returns: dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \ ``num_samples``. - ``loss`` is a tensor for back propagation, which can be a weighted sum of multiple losses. - ``log_vars`` contains all the variables to be sent to the logger. - ``num_samples`` indicates the batch size (when the model is DDP, it means the batch size on each GPU), which is used for averaging the logs. """ losses = self(**data) loss, log_vars = self._parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def val_step(self, data, optimizer=None): """The iteration step during validation. This method shares the same signature as :func:`train_step`, but used during val epochs. Note that the evaluation after training epochs is not implemented with this method, but an evaluation hook. """ losses = self(**data) loss, log_vars = self._parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def show_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None): """Draw `result` over `img`. Args: img (str or Tensor): The image to be displayed. result (Tensor or tuple): The results to draw over `img` bbox_result or (bbox_result, segm_result). score_thr (float, optional): Minimum score of bboxes to be shown. Default: 0.3. bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. The tuple of color should be in BGR order. Default: 'green' text_color (str or tuple(int) or :obj:`Color`):Color of texts. The tuple of color should be in BGR order. Default: 'green' mask_color (None or str or tuple(int) or :obj:`Color`): Color of masks. The tuple of color should be in BGR order. Default: None thickness (int): Thickness of lines. Default: 2 font_size (int): Font size of texts. Default: 13 win_name (str): The window name. Default: '' wait_time (float): Value of waitKey param. Default: 0. show (bool): Whether to show the image. Default: False. out_file (str or None): The filename to write the image. Default: None. Returns: img (Tensor): Only if not `show` or `out_file` """ img = mmcv.imread(img) img = img.copy() if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) # draw segmentation masks segms = None if segm_result is not None and len(labels) > 0: # non empty segms = mmcv.concat_list(segm_result) if isinstance(segms[0], torch.Tensor): segms = torch.stack(segms, dim=0).detach().cpu().numpy() else: segms = np.stack(segms, axis=0) # if out_file specified, do not show image in window if out_file is not None: show = False # draw bounding boxes img = imshow_det_bboxes( img, bboxes, labels, segms, class_names=self.CLASSES, score_thr=score_thr, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if not (show or out_file): return img def onnx_export(self, img, img_metas): raise NotImplementedError(f'{self.__class__.__name__} does ' f'not support ONNX EXPORT') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/cascade_rcnn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class CascadeRCNN(TwoStageDetector): r"""Implementation of `Cascade R-CNN: Delving into High Quality Object Detection `_""" def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(CascadeRCNN, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) def show_result(self, data, result, **kwargs): """Show prediction results of the detector. Args: data (str or np.ndarray): Image filename or loaded image. result (Tensor or tuple): The results to draw over `img` bbox_result or (bbox_result, segm_result). Returns: np.ndarray: The image with bboxes drawn on it. """ if self.with_mask: ms_bbox_result, ms_segm_result = result if isinstance(ms_bbox_result, dict): result = (ms_bbox_result['ensemble'], ms_segm_result['ensemble']) else: if isinstance(result, dict): result = result['ensemble'] return super(CascadeRCNN, self).show_result(data, result, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/centernet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import bbox2result from mmdet.models.builder import DETECTORS from ...core.utils import flip_tensor from .single_stage import SingleStageDetector @DETECTORS.register_module() class CenterNet(SingleStageDetector): """Implementation of CenterNet(Objects as Points) . """ def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def merge_aug_results(self, aug_results, with_nms): """Merge augmented detection bboxes and score. Args: aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each image. with_nms (bool): If True, do nms before return boxes. Returns: tuple: (out_bboxes, out_labels) """ recovered_bboxes, aug_labels = [], [] for single_result in aug_results: recovered_bboxes.append(single_result[0][0]) aug_labels.append(single_result[0][1]) bboxes = torch.cat(recovered_bboxes, dim=0).contiguous() labels = torch.cat(aug_labels).contiguous() if with_nms: out_bboxes, out_labels = self.bbox_head._bboxes_nms( bboxes, labels, self.bbox_head.test_cfg) else: out_bboxes, out_labels = bboxes, labels return out_bboxes, out_labels def aug_test(self, imgs, img_metas, rescale=True): """Augment testing of CenterNet. Aug test must have flipped image pair, and unlike CornerNet, it will perform an averaging operation on the feature map instead of detecting bbox. Args: imgs (list[Tensor]): Augmented images. img_metas (list[list[dict]]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: True. Note: ``imgs`` must including flipped image pairs. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ img_inds = list(range(len(imgs))) assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], ( 'aug test must have flipped image pair') aug_results = [] for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]): flip_direction = img_metas[flip_ind][0]['flip_direction'] img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) x = self.extract_feat(img_pair) center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x) assert len(center_heatmap_preds) == len(wh_preds) == len( offset_preds) == 1 # Feature map averaging center_heatmap_preds[0] = ( center_heatmap_preds[0][0:1] + flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2 wh_preds[0] = (wh_preds[0][0:1] + flip_tensor(wh_preds[0][1:2], flip_direction)) / 2 bbox_list = self.bbox_head.get_bboxes( center_heatmap_preds, wh_preds, [offset_preds[0][0:1]], img_metas[ind], rescale=rescale, with_nms=False) aug_results.append(bbox_list) nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None) if nms_cfg is None: with_nms = False else: with_nms = True bbox_list = [self.merge_aug_results(aug_results, with_nms)] bbox_results = [ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for det_bboxes, det_labels in bbox_list ] return bbox_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/cornernet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import bbox2result, bbox_mapping_back from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class CornerNet(SingleStageDetector): """CornerNet. This detector is the implementation of the paper `CornerNet: Detecting Objects as Paired Keypoints `_ . """ def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def merge_aug_results(self, aug_results, img_metas): """Merge augmented detection bboxes and score. Args: aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each image. img_metas (list[list[dict]]): Meta information of each image, e.g., image size, scaling factor, etc. Returns: tuple: (bboxes, labels) """ recovered_bboxes, aug_labels = [], [] for bboxes_labels, img_info in zip(aug_results, img_metas): img_shape = img_info[0]['img_shape'] # using shape before padding scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] bboxes, labels = bboxes_labels bboxes, scores = bboxes[:, :4], bboxes[:, -1:] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1)) aug_labels.append(labels) bboxes = torch.cat(recovered_bboxes, dim=0) labels = torch.cat(aug_labels) if bboxes.shape[0] > 0: out_bboxes, out_labels = self.bbox_head._bboxes_nms( bboxes, labels, self.bbox_head.test_cfg) else: out_bboxes, out_labels = bboxes, labels return out_bboxes, out_labels def aug_test(self, imgs, img_metas, rescale=False): """Augment testing of CornerNet. Args: imgs (list[Tensor]): Augmented images. img_metas (list[list[dict]]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. Note: ``imgs`` must including flipped image pairs. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ img_inds = list(range(len(imgs))) assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], ( 'aug test must have flipped image pair') aug_results = [] for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]): img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) x = self.extract_feat(img_pair) outs = self.bbox_head(x) bbox_list = self.bbox_head.get_bboxes( *outs, [img_metas[ind], img_metas[flip_ind]], False, False) aug_results.append(bbox_list[0]) aug_results.append(bbox_list[1]) bboxes, labels = self.merge_aug_results(aug_results, img_metas) bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes) return [bbox_results] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/ddod.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class DDOD(SingleStageDetector): """Implementation of `DDOD `_.""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(DDOD, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/deformable_detr.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .detr import DETR @DETECTORS.register_module() class DeformableDETR(DETR): def __init__(self, *args, **kwargs): super(DETR, self).__init__(*args, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/detr.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class DETR(SingleStageDetector): r"""Implementation of `DETR: End-to-End Object Detection with Transformers `_""" def __init__(self, backbone, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(DETR, self).__init__(backbone, None, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) # over-write `forward_dummy` because: # the forward of bbox_head requires img_metas def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/analysis_tools/get_flops.py` """ warnings.warn('Warning! MultiheadAttention in DETR does not ' 'support flops computation! Do not use the ' 'results in your papers!') batch_size, _, height, width = img.shape dummy_img_metas = [ dict( batch_input_shape=(height, width), img_shape=(height, width, 3)) for _ in range(batch_size) ] x = self.extract_feat(img) outs = self.bbox_head(x, dummy_img_metas) return outs # over-write `onnx_export` because: # (1) the forward of bbox_head requires img_metas # (2) the different behavior (e.g. construction of `masks`) between # torch and ONNX model, during the forward of bbox_head def onnx_export(self, img, img_metas): """Test function for exporting to ONNX, without test time augmentation. Args: img (torch.Tensor): input images. img_metas (list[dict]): List of image information. Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ x = self.extract_feat(img) # forward of this head requires img_metas outs = self.bbox_head.forward_onnx(x, img_metas) # get shape as tensor img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) return det_bboxes, det_labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/fast_rcnn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class FastRCNN(TwoStageDetector): """Implementation of `Fast R-CNN `_""" def __init__(self, backbone, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(FastRCNN, self).__init__( backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) def forward_test(self, imgs, img_metas, proposals, **kwargs): """ Args: imgs (List[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains all images in the batch. img_metas (List[List[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. proposals (List[List[Tensor]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. The Tensor should have a shape Px4, where P is the number of proposals. """ for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if not isinstance(var, list): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(imgs) if num_augs != len(img_metas): raise ValueError(f'num of augmentations ({len(imgs)}) ' f'!= num of image meta ({len(img_metas)})') if num_augs == 1: return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs) else: # TODO: support test-time augmentation assert NotImplementedError ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/faster_rcnn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class FasterRCNN(TwoStageDetector): """Implementation of `Faster R-CNN `_""" def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(FasterRCNN, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/fcos.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class FCOS(SingleStageDetector): """Implementation of `FCOS `_""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/fovea.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class FOVEA(SingleStageDetector): """Implementation of `FoveaBox `_""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/fsaf.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class FSAF(SingleStageDetector): """Implementation of `FSAF `_""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/gfl.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class GFL(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/grid_rcnn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class GridRCNN(TwoStageDetector): """Grid R-CNN. This detector is the implementation of: - Grid R-CNN (https://arxiv.org/abs/1811.12030) - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) """ def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(GridRCNN, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/htc.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .cascade_rcnn import CascadeRCNN @DETECTORS.register_module() class HybridTaskCascade(CascadeRCNN): """Implementation of `HTC `_""" def __init__(self, **kwargs): super(HybridTaskCascade, self).__init__(**kwargs) @property def with_semantic(self): """bool: whether the detector has a semantic head""" return self.roi_head.with_semantic ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/kd_one_stage.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from pathlib import Path import mmcv import torch from mmcv.runner import load_checkpoint from .. import build_detector from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class KnowledgeDistillationSingleStageDetector(SingleStageDetector): r"""Implementation of `Distilling the Knowledge in a Neural Network. `_. Args: teacher_config (str | dict): Config file path or the config object of teacher model. teacher_ckpt (str, optional): Checkpoint path of teacher model. If left as None, the model will not load any weights. """ def __init__(self, backbone, neck, bbox_head, teacher_config, teacher_ckpt=None, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None): super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) self.eval_teacher = eval_teacher # Build teacher model if isinstance(teacher_config, (str, Path)): teacher_config = mmcv.Config.fromfile(teacher_config) self.teacher_model = build_detector(teacher_config['model']) if teacher_ckpt is not None: load_checkpoint( self.teacher_model, teacher_ckpt, map_location='cpu') def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ x = self.extract_feat(img) with torch.no_grad(): teacher_x = self.teacher_model.extract_feat(img) out_teacher = self.teacher_model.bbox_head(teacher_x) losses = self.bbox_head.forward_train(x, out_teacher, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses def cuda(self, device=None): """Since teacher_model is registered as a plain object, it is necessary to put the teacher model to cuda when calling cuda function.""" self.teacher_model.cuda(device=device) return super().cuda(device=device) def train(self, mode=True): """Set the same train mode for teacher and student model.""" if self.eval_teacher: self.teacher_model.train(False) else: self.teacher_model.train(mode) super().train(mode) def __setattr__(self, name, value): """Set attribute, i.e. self.name = value This reloading prevent the teacher model from being registered as a nn.Module. The teacher module is registered as a plain object, so that the teacher parameters will not show up when calling ``self.parameters``, ``self.modules``, ``self.children`` methods. """ if name == 'teacher_model': object.__setattr__(self, name, value) else: super().__setattr__(name, value) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/lad.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.runner import load_checkpoint from ..builder import DETECTORS, build_backbone, build_head, build_neck from .kd_one_stage import KnowledgeDistillationSingleStageDetector @DETECTORS.register_module() class LAD(KnowledgeDistillationSingleStageDetector): """Implementation of `LAD `_.""" def __init__(self, backbone, neck, bbox_head, teacher_backbone, teacher_neck, teacher_bbox_head, teacher_ckpt, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None): super(KnowledgeDistillationSingleStageDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) self.eval_teacher = eval_teacher self.teacher_model = nn.Module() self.teacher_model.backbone = build_backbone(teacher_backbone) if teacher_neck is not None: self.teacher_model.neck = build_neck(teacher_neck) teacher_bbox_head.update(train_cfg=train_cfg) teacher_bbox_head.update(test_cfg=test_cfg) self.teacher_model.bbox_head = build_head(teacher_bbox_head) if teacher_ckpt is not None: load_checkpoint( self.teacher_model, teacher_ckpt, map_location='cpu') @property def with_teacher_neck(self): """bool: whether the detector has a teacher_neck""" return hasattr(self.teacher_model, 'neck') and \ self.teacher_model.neck is not None def extract_teacher_feat(self, img): """Directly extract teacher features from the backbone+neck.""" x = self.teacher_model.backbone(img) if self.with_teacher_neck: x = self.teacher_model.neck(x) return x def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ # get label assignment from the teacher with torch.no_grad(): x_teacher = self.extract_teacher_feat(img) outs_teacher = self.teacher_model.bbox_head(x_teacher) label_assignment_results = \ self.teacher_model.bbox_head.get_label_assignment( *outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # the student use the label assignment from the teacher to learn x = self.extract_feat(img) losses = self.bbox_head.forward_train(x, label_assignment_results, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/mask2former.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .maskformer import MaskFormer @DETECTORS.register_module() class Mask2Former(MaskFormer): r"""Implementation of `Masked-attention Mask Transformer for Universal Image Segmentation `_.""" def __init__(self, backbone, neck=None, panoptic_head=None, panoptic_fusion_head=None, train_cfg=None, test_cfg=None, init_cfg=None): super().__init__( backbone, neck=neck, panoptic_head=panoptic_head, panoptic_fusion_head=panoptic_fusion_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/mask_rcnn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class MaskRCNN(TwoStageDetector): """Implementation of `Mask R-CNN `_""" def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(MaskRCNN, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/mask_scoring_rcnn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class MaskScoringRCNN(TwoStageDetector): """Mask Scoring RCNN. https://arxiv.org/abs/1903.00241 """ def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(MaskScoringRCNN, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/maskformer.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import mmcv import numpy as np from mmdet.core import INSTANCE_OFFSET, bbox2result from mmdet.core.visualization import imshow_det_bboxes from ..builder import DETECTORS, build_backbone, build_head, build_neck from .single_stage import SingleStageDetector @DETECTORS.register_module() class MaskFormer(SingleStageDetector): r"""Implementation of `Per-Pixel Classification is NOT All You Need for Semantic Segmentation `_.""" def __init__(self, backbone, neck=None, panoptic_head=None, panoptic_fusion_head=None, train_cfg=None, test_cfg=None, init_cfg=None): super(SingleStageDetector, self).__init__(init_cfg=init_cfg) self.backbone = build_backbone(backbone) if neck is not None: self.neck = build_neck(neck) panoptic_head_ = copy.deepcopy(panoptic_head) panoptic_head_.update(train_cfg=train_cfg) panoptic_head_.update(test_cfg=test_cfg) self.panoptic_head = build_head(panoptic_head_) panoptic_fusion_head_ = copy.deepcopy(panoptic_fusion_head) panoptic_fusion_head_.update(test_cfg=test_cfg) self.panoptic_fusion_head = build_head(panoptic_fusion_head_) self.num_things_classes = self.panoptic_head.num_things_classes self.num_stuff_classes = self.panoptic_head.num_stuff_classes self.num_classes = self.panoptic_head.num_classes self.train_cfg = train_cfg self.test_cfg = test_cfg # BaseDetector.show_result default for instance segmentation if self.num_stuff_classes > 0: self.show_result = self._show_pan_result def forward_dummy(self, img, img_metas): """Used for computing network flops. See `mmdetection/tools/analysis_tools/get_flops.py` Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[Dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. """ super(SingleStageDetector, self).forward_train(img, img_metas) x = self.extract_feat(img) outs = self.panoptic_head(x, img_metas) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_masks, gt_semantic_seg=None, gt_bboxes_ignore=None, **kargs): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[Dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box. gt_masks (list[BitmapMasks]): true segmentation masks for each box used if the architecture supports a segmentation task. gt_semantic_seg (list[tensor]): semantic segmentation mask for images for panoptic segmentation. Defaults to None for instance segmentation. gt_bboxes_ignore (list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Defaults to None. Returns: dict[str, Tensor]: a dictionary of loss components """ # add batch_input_shape in img_metas super(SingleStageDetector, self).forward_train(img, img_metas) x = self.extract_feat(img) losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes, gt_labels, gt_masks, gt_semantic_seg, gt_bboxes_ignore) return losses def simple_test(self, imgs, img_metas, **kwargs): """Test without augmentation. Args: imgs (Tensor): A batch of images. img_metas (list[dict]): List of image information. Returns: list[dict[str, np.array | tuple[list]] | tuple[list]]: Semantic segmentation results and panoptic segmentation \ results of each image for panoptic segmentation, or formatted \ bbox and mask results of each image for instance segmentation. .. code-block:: none [ # panoptic segmentation { 'pan_results': np.array, # shape = [h, w] 'ins_results': tuple[list], # semantic segmentation results are not supported yet 'sem_results': np.array }, ... ] or .. code-block:: none [ # instance segmentation ( bboxes, # list[np.array] masks # list[list[np.array]] ), ... ] """ feats = self.extract_feat(imgs) mask_cls_results, mask_pred_results = self.panoptic_head.simple_test( feats, img_metas, **kwargs) results = self.panoptic_fusion_head.simple_test( mask_cls_results, mask_pred_results, img_metas, **kwargs) for i in range(len(results)): if 'pan_results' in results[i]: results[i]['pan_results'] = results[i]['pan_results'].detach( ).cpu().numpy() if 'ins_results' in results[i]: labels_per_image, bboxes, mask_pred_binary = results[i][ 'ins_results'] bbox_results = bbox2result(bboxes, labels_per_image, self.num_things_classes) mask_results = [[] for _ in range(self.num_things_classes)] for j, label in enumerate(labels_per_image): mask = mask_pred_binary[j].detach().cpu().numpy() mask_results[label].append(mask) results[i]['ins_results'] = bbox_results, mask_results assert 'sem_results' not in results[i], 'segmantic segmentation '\ 'results are not supported yet.' if self.num_stuff_classes == 0: results = [res['ins_results'] for res in results] return results def aug_test(self, imgs, img_metas, **kwargs): raise NotImplementedError def onnx_export(self, img, img_metas): raise NotImplementedError def _show_pan_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None): """Draw `panoptic result` over `img`. Args: img (str or Tensor): The image to be displayed. result (dict): The results. score_thr (float, optional): Minimum score of bboxes to be shown. Default: 0.3. bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. The tuple of color should be in BGR order. Default: 'green'. text_color (str or tuple(int) or :obj:`Color`):Color of texts. The tuple of color should be in BGR order. Default: 'green'. mask_color (None or str or tuple(int) or :obj:`Color`): Color of masks. The tuple of color should be in BGR order. Default: None. thickness (int): Thickness of lines. Default: 2. font_size (int): Font size of texts. Default: 13. win_name (str): The window name. Default: ''. wait_time (float): Value of waitKey param. Default: 0. show (bool): Whether to show the image. Default: False. out_file (str or None): The filename to write the image. Default: None. Returns: img (Tensor): Only if not `show` or `out_file`. """ img = mmcv.imread(img) img = img.copy() pan_results = result['pan_results'] # keep objects ahead ids = np.unique(pan_results)[::-1] legal_indices = ids != self.num_classes # for VOID label ids = ids[legal_indices] labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) segms = (pan_results[None] == ids[:, None, None]) # if out_file specified, do not show image in window if out_file is not None: show = False # draw bounding boxes img = imshow_det_bboxes( img, segms=segms, labels=labels, class_names=self.CLASSES, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if not (show or out_file): return img ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/nasfcos.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class NASFCOS(SingleStageDetector): """NAS-FCOS: Fast Neural Architecture Search for Object Detection. https://arxiv.org/abs/1906.0442 """ def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/paa.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class PAA(SingleStageDetector): """Implementation of `PAA `_.""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/panoptic_fpn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor @DETECTORS.register_module() class PanopticFPN(TwoStagePanopticSegmentor): r"""Implementation of `Panoptic feature pyramid networks `_""" def __init__( self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None, # for panoptic segmentation semantic_head=None, panoptic_fusion_head=None): super(PanopticFPN, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg, semantic_head=semantic_head, panoptic_fusion_head=panoptic_fusion_head) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/panoptic_two_stage_segmentor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch from mmdet.core import INSTANCE_OFFSET, bbox2roi, multiclass_nms from mmdet.core.visualization import imshow_det_bboxes from ..builder import DETECTORS, build_head from ..roi_heads.mask_heads.fcn_mask_head import _do_paste_mask from .two_stage import TwoStageDetector @DETECTORS.register_module() class TwoStagePanopticSegmentor(TwoStageDetector): """Base class of Two-stage Panoptic Segmentor. As well as the components in TwoStageDetector, Panoptic Segmentor has extra semantic_head and panoptic_fusion_head. """ def __init__( self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None, # for panoptic segmentation semantic_head=None, panoptic_fusion_head=None): super(TwoStagePanopticSegmentor, self).__init__(backbone, neck, rpn_head, roi_head, train_cfg, test_cfg, pretrained, init_cfg) if semantic_head is not None: self.semantic_head = build_head(semantic_head) if panoptic_fusion_head is not None: panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None panoptic_fusion_head_ = panoptic_fusion_head.deepcopy() panoptic_fusion_head_.update(test_cfg=panoptic_cfg) self.panoptic_fusion_head = build_head(panoptic_fusion_head_) self.num_things_classes = self.panoptic_fusion_head.\ num_things_classes self.num_stuff_classes = self.panoptic_fusion_head.\ num_stuff_classes self.num_classes = self.panoptic_fusion_head.num_classes @property def with_semantic_head(self): return hasattr(self, 'semantic_head') and self.semantic_head is not None @property def with_panoptic_fusion_head(self): return hasattr(self, 'panoptic_fusion_heads') and \ self.panoptic_fusion_head is not None def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/get_flops.py` """ raise NotImplementedError( f'`forward_dummy` is not implemented in {self.__class__.__name__}') def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, gt_semantic_seg=None, proposals=None, **kwargs): x = self.extract_feat(img) losses = dict() # RPN forward and loss if self.with_rpn: proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) rpn_losses, proposal_list = self.rpn_head.forward_train( x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore, proposal_cfg=proposal_cfg) losses.update(rpn_losses) else: proposal_list = proposals roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks, **kwargs) losses.update(roi_losses) semantic_loss = self.semantic_head.forward_train(x, gt_semantic_seg) losses.update(semantic_loss) return losses def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Simple test for mask head without augmentation.""" img_shapes = tuple(meta['ori_shape'] for meta in img_metas) if rescale else tuple( meta['pad_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): masks = [] for img_shape in img_shapes: out_shape = (0, self.roi_head.bbox_head.num_classes) \ + img_shape[:2] masks.append(det_bboxes[0].new_zeros(out_shape)) mask_pred = det_bboxes[0].new_zeros((0, 80, 28, 28)) mask_results = dict( masks=masks, mask_pred=mask_pred, mask_feats=None) return mask_results _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))] if rescale: if not isinstance(scale_factors[0], float): scale_factors = [ det_bboxes[0].new_tensor(scale_factor) for scale_factor in scale_factors ] _bboxes = [ _bboxes[i] * scale_factors[i] for i in range(len(_bboxes)) ] mask_rois = bbox2roi(_bboxes) mask_results = self.roi_head._mask_forward(x, mask_rois) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] mask_preds = mask_pred.split(num_mask_roi_per_img, 0) # resize the mask_preds to (K, H, W) masks = [] for i in range(len(_bboxes)): det_bbox = det_bboxes[i][:, :4] det_label = det_labels[i] mask_pred = mask_preds[i].sigmoid() box_inds = torch.arange(mask_pred.shape[0]) mask_pred = mask_pred[box_inds, det_label][:, None] img_h, img_w, _ = img_shapes[i] mask_pred, _ = _do_paste_mask( mask_pred, det_bbox, img_h, img_w, skip_empty=False) masks.append(mask_pred) mask_results['masks'] = masks return mask_results def simple_test(self, img, img_metas, proposals=None, rescale=False): """Test without Augmentation.""" x = self.extract_feat(img) if proposals is None: proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) else: proposal_list = proposals bboxes, scores = self.roi_head.simple_test_bboxes( x, img_metas, proposal_list, None, rescale=rescale) pan_cfg = self.test_cfg.panoptic # class-wise predictions det_bboxes = [] det_labels = [] for bboxe, score in zip(bboxes, scores): det_bbox, det_label = multiclass_nms(bboxe, score, pan_cfg.score_thr, pan_cfg.nms, pan_cfg.max_per_img) det_bboxes.append(det_bbox) det_labels.append(det_label) mask_results = self.simple_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale) masks = mask_results['masks'] seg_preds = self.semantic_head.simple_test(x, img_metas, rescale) results = [] for i in range(len(det_bboxes)): pan_results = self.panoptic_fusion_head.simple_test( det_bboxes[i], det_labels[i], masks[i], seg_preds[i]) pan_results = pan_results.int().detach().cpu().numpy() result = dict(pan_results=pan_results) results.append(result) return results def show_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None): """Draw `result` over `img`. Args: img (str or Tensor): The image to be displayed. result (dict): The results. score_thr (float, optional): Minimum score of bboxes to be shown. Default: 0.3. bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. The tuple of color should be in BGR order. Default: 'green'. text_color (str or tuple(int) or :obj:`Color`):Color of texts. The tuple of color should be in BGR order. Default: 'green'. mask_color (None or str or tuple(int) or :obj:`Color`): Color of masks. The tuple of color should be in BGR order. Default: None. thickness (int): Thickness of lines. Default: 2. font_size (int): Font size of texts. Default: 13. win_name (str): The window name. Default: ''. wait_time (float): Value of waitKey param. Default: 0. show (bool): Whether to show the image. Default: False. out_file (str or None): The filename to write the image. Default: None. Returns: img (Tensor): Only if not `show` or `out_file`. """ img = mmcv.imread(img) img = img.copy() pan_results = result['pan_results'] # keep objects ahead ids = np.unique(pan_results)[::-1] legal_indices = ids != self.num_classes # for VOID label ids = ids[legal_indices] labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) segms = (pan_results[None] == ids[:, None, None]) # if out_file specified, do not show image in window if out_file is not None: show = False # draw bounding boxes img = imshow_det_bboxes( img, segms=segms, labels=labels, class_names=self.CLASSES, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if not (show or out_file): return img ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/point_rend.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class PointRend(TwoStageDetector): """PointRend: Image Segmentation as Rendering This detector is the implementation of `PointRend `_. """ def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(PointRend, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/queryinst.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .sparse_rcnn import SparseRCNN @DETECTORS.register_module() class QueryInst(SparseRCNN): r"""Implementation of `Instances as Queries `_""" def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(QueryInst, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/reppoints_detector.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class RepPointsDetector(SingleStageDetector): """RepPoints: Point Set Representation for Object Detection. This detector is the implementation of: - RepPoints detector (https://arxiv.org/pdf/1904.11490) """ def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(RepPointsDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/retinanet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class RetinaNet(SingleStageDetector): """Implementation of `RetinaNet `_""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/rpn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings from inspect import signature import mmcv import torch from mmcv.image import tensor2imgs from mmdet.core import bbox_mapping from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector @DETECTORS.register_module() class RPN(BaseDetector): """Implementation of Region Proposal Network.""" def __init__(self, backbone, neck, rpn_head, train_cfg, test_cfg, pretrained=None, init_cfg=None): super(RPN, self).__init__(init_cfg) if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') backbone.pretrained = pretrained self.backbone = build_backbone(backbone) self.neck = build_neck(neck) if neck is not None else None rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None rpn_head.update(train_cfg=rpn_train_cfg) rpn_head.update(test_cfg=test_cfg.rpn) self.rpn_head = build_head(rpn_head) self.train_cfg = train_cfg self.test_cfg = test_cfg def extract_feat(self, img): """Extract features. Args: img (torch.Tensor): Image tensor with shape (n, c, h ,w). Returns: list[torch.Tensor]: Multi-level features that may have different resolutions. """ x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Dummy forward function.""" x = self.extract_feat(img) rpn_outs = self.rpn_head(x) return rpn_outs def forward_train(self, img, img_metas, gt_bboxes=None, gt_bboxes_ignore=None): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ if (isinstance(self.train_cfg.rpn, dict) and self.train_cfg.rpn.get('debug', False)): self.rpn_head.debug_imgs = tensor2imgs(img) x = self.extract_feat(img) losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None, gt_bboxes_ignore) return losses def simple_test(self, img, img_metas, rescale=False): """Test function without test time augmentation. Args: imgs (list[torch.Tensor]): List of multiple images img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[np.ndarray]: proposals """ x = self.extract_feat(img) # get origin input shape to onnx dynamic input shape if torch.onnx.is_in_onnx_export(): img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) if rescale: for proposals, meta in zip(proposal_list, img_metas): proposals[:, :4] /= proposals.new_tensor(meta['scale_factor']) if torch.onnx.is_in_onnx_export(): return proposal_list return [proposal.cpu().numpy() for proposal in proposal_list] def aug_test(self, imgs, img_metas, rescale=False): """Test function with test time augmentation. Args: imgs (list[torch.Tensor]): List of multiple images img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[np.ndarray]: proposals """ proposal_list = self.rpn_head.aug_test_rpn( self.extract_feats(imgs), img_metas) if not rescale: for proposals, img_meta in zip(proposal_list, img_metas[0]): img_shape = img_meta['img_shape'] scale_factor = img_meta['scale_factor'] flip = img_meta['flip'] flip_direction = img_meta['flip_direction'] proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape, scale_factor, flip, flip_direction) return [proposal.cpu().numpy() for proposal in proposal_list] def show_result(self, data, result, top_k=20, **kwargs): """Show RPN proposals on the image. Args: data (str or np.ndarray): Image filename or loaded image. result (Tensor or tuple): The results to draw over `img` bbox_result or (bbox_result, segm_result). top_k (int): Plot the first k bboxes only if set positive. Default: 20 Returns: np.ndarray: The image with bboxes drawn on it. """ if kwargs is not None: kwargs['colors'] = 'green' sig = signature(mmcv.imshow_bboxes) for k in list(kwargs.keys()): if k not in sig.parameters: kwargs.pop(k) mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/scnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .cascade_rcnn import CascadeRCNN @DETECTORS.register_module() class SCNet(CascadeRCNN): """Implementation of `SCNet `_""" def __init__(self, **kwargs): super(SCNet, self).__init__(**kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/single_stage.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch from mmdet.core import bbox2result from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector @DETECTORS.register_module() class SingleStageDetector(BaseDetector): """Base class for single-stage detectors. Single-stage detectors directly and densely predict bounding boxes on the output features of the backbone+neck. """ def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(SingleStageDetector, self).__init__(init_cfg) if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') backbone.pretrained = pretrained self.backbone = build_backbone(backbone) if neck is not None: self.neck = build_neck(neck) bbox_head.update(train_cfg=train_cfg) bbox_head.update(test_cfg=test_cfg) self.bbox_head = build_head(bbox_head) self.train_cfg = train_cfg self.test_cfg = test_cfg def extract_feat(self, img): """Directly extract features from the backbone+neck.""" x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/analysis_tools/get_flops.py` """ x = self.extract_feat(img) outs = self.bbox_head(x) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ super(SingleStageDetector, self).forward_train(img, img_metas) x = self.extract_feat(img) losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses def simple_test(self, img, img_metas, rescale=False): """Test function without test-time augmentation. Args: img (torch.Tensor): Images with shape (N, C, H, W). img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ feat = self.extract_feat(img) results_list = self.bbox_head.simple_test( feat, img_metas, rescale=rescale) bbox_results = [ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for det_bboxes, det_labels in results_list ] return bbox_results def aug_test(self, imgs, img_metas, rescale=False): """Test function with test time augmentation. Args: imgs (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ assert hasattr(self.bbox_head, 'aug_test'), \ f'{self.bbox_head.__class__.__name__}' \ ' does not support test-time augmentation' feats = self.extract_feats(imgs) results_list = self.bbox_head.aug_test( feats, img_metas, rescale=rescale) bbox_results = [ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for det_bboxes, det_labels in results_list ] return bbox_results def onnx_export(self, img, img_metas, with_nms=True): """Test function without test time augmentation. Args: img (torch.Tensor): input images. img_metas (list[dict]): List of image information. Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ x = self.extract_feat(img) outs = self.bbox_head(x) # get origin input shape to support onnx dynamic shape # get shape as tensor img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape # get pad input shape to support onnx dynamic shape for exporting # `CornerNet` and `CentripetalNet`, which 'pad_shape' is used # for inference img_metas[0]['pad_shape_for_onnx'] = img_shape if len(outs) == 2: # add dummy score_factor outs = (*outs, None) # TODO Can we change to `get_bboxes` when `onnx_export` fail det_bboxes, det_labels = self.bbox_head.onnx_export( *outs, img_metas, with_nms=with_nms) return det_bboxes, det_labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/single_stage_instance_seg.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import warnings import mmcv import numpy as np import torch from mmdet.core.visualization.image import imshow_det_bboxes from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector INF = 1e8 @DETECTORS.register_module() class SingleStageInstanceSegmentor(BaseDetector): """Base class for single-stage instance segmentors.""" def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') backbone.pretrained = pretrained super(SingleStageInstanceSegmentor, self).__init__(init_cfg=init_cfg) self.backbone = build_backbone(backbone) if neck is not None: self.neck = build_neck(neck) else: self.neck = None if bbox_head is not None: bbox_head.update(train_cfg=copy.deepcopy(train_cfg)) bbox_head.update(test_cfg=copy.deepcopy(test_cfg)) self.bbox_head = build_head(bbox_head) else: self.bbox_head = None assert mask_head, f'`mask_head` must ' \ f'be implemented in {self.__class__.__name__}' mask_head.update(train_cfg=copy.deepcopy(train_cfg)) mask_head.update(test_cfg=copy.deepcopy(test_cfg)) self.mask_head = build_head(mask_head) self.train_cfg = train_cfg self.test_cfg = test_cfg def extract_feat(self, img): """Directly extract features from the backbone and neck.""" x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/analysis_tools/get_flops.py` """ raise NotImplementedError( f'`forward_dummy` is not implemented in {self.__class__.__name__}') def forward_train(self, img, img_metas, gt_masks, gt_labels, gt_bboxes=None, gt_bboxes_ignore=None, **kwargs): """ Args: img (Tensor): Input images of shape (B, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_masks (list[:obj:`BitmapMasks`] | None) : The segmentation masks for each box. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes (list[Tensor]): Each item is the truth boxes of each image in [tl_x, tl_y, br_x, br_y] format. Default: None. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ gt_masks = [ gt_mask.to_tensor(dtype=torch.bool, device=img.device) for gt_mask in gt_masks ] x = self.extract_feat(img) losses = dict() # CondInst and YOLACT have bbox_head if self.bbox_head: # bbox_head_preds is a tuple bbox_head_preds = self.bbox_head(x) # positive_infos is a list of obj:`InstanceData` # It contains the information about the positive samples # CondInst, YOLACT det_losses, positive_infos = self.bbox_head.loss( *bbox_head_preds, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_masks=gt_masks, img_metas=img_metas, gt_bboxes_ignore=gt_bboxes_ignore, **kwargs) losses.update(det_losses) else: positive_infos = None mask_loss = self.mask_head.forward_train( x, gt_labels, gt_masks, img_metas, positive_infos=positive_infos, gt_bboxes=gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, **kwargs) # avoid loss override assert not set(mask_loss.keys()) & set(losses.keys()) losses.update(mask_loss) return losses def simple_test(self, img, img_metas, rescale=False): """Test function without test-time augmentation. Args: img (torch.Tensor): Images with shape (B, C, H, W). img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list(tuple): Formatted bbox and mask results of multiple \ images. The outer list corresponds to each image. \ Each tuple contains two type of results of single image: - bbox_results (list[np.ndarray]): BBox results of single image. The list corresponds to each class. each ndarray has a shape (N, 5), N is the number of bboxes with this category, and last dimension 5 arrange as (x1, y1, x2, y2, scores). - mask_results (list[np.ndarray]): Mask results of single image. The list corresponds to each class. each ndarray has a shape (N, img_h, img_w), N is the number of masks with this category. """ feat = self.extract_feat(img) if self.bbox_head: outs = self.bbox_head(feat) # results_list is list[obj:`InstanceData`] results_list = self.bbox_head.get_results( *outs, img_metas=img_metas, cfg=self.test_cfg, rescale=rescale) else: results_list = None results_list = self.mask_head.simple_test( feat, img_metas, rescale=rescale, instances_list=results_list) format_results_list = [] for results in results_list: format_results_list.append(self.format_results(results)) return format_results_list def format_results(self, results): """Format the model predictions according to the interface with dataset. Args: results (:obj:`InstanceData`): Processed results of single images. Usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,) - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). Returns: tuple: Formatted bbox and mask results.. It contains two items: - bbox_results (list[np.ndarray]): BBox results of single image. The list corresponds to each class. each ndarray has a shape (N, 5), N is the number of bboxes with this category, and last dimension 5 arrange as (x1, y1, x2, y2, scores). - mask_results (list[np.ndarray]): Mask results of single image. The list corresponds to each class. each ndarray has shape (N, img_h, img_w), N is the number of masks with this category. """ data_keys = results.keys() assert 'scores' in data_keys assert 'labels' in data_keys assert 'masks' in data_keys, \ 'results should contain ' \ 'masks when format the results ' mask_results = [[] for _ in range(self.mask_head.num_classes)] num_masks = len(results) if num_masks == 0: bbox_results = [ np.zeros((0, 5), dtype=np.float32) for _ in range(self.mask_head.num_classes) ] return bbox_results, mask_results labels = results.labels.detach().cpu().numpy() if 'bboxes' not in results: # create dummy bbox results to store the scores results.bboxes = results.scores.new_zeros(len(results), 4) det_bboxes = torch.cat([results.bboxes, results.scores[:, None]], dim=-1) det_bboxes = det_bboxes.detach().cpu().numpy() bbox_results = [ det_bboxes[labels == i, :] for i in range(self.mask_head.num_classes) ] masks = results.masks.detach().cpu().numpy() for idx in range(num_masks): mask = masks[idx] mask_results[labels[idx]].append(mask) return bbox_results, mask_results def aug_test(self, imgs, img_metas, rescale=False): raise NotImplementedError def show_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None): """Draw `result` over `img`. Args: img (str or Tensor): The image to be displayed. result (tuple): Format bbox and mask results. It contains two items: - bbox_results (list[np.ndarray]): BBox results of single image. The list corresponds to each class. each ndarray has a shape (N, 5), N is the number of bboxes with this category, and last dimension 5 arrange as (x1, y1, x2, y2, scores). - mask_results (list[np.ndarray]): Mask results of single image. The list corresponds to each class. each ndarray has shape (N, img_h, img_w), N is the number of masks with this category. score_thr (float, optional): Minimum score of bboxes to be shown. Default: 0.3. bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. The tuple of color should be in BGR order. Default: 'green' text_color (str or tuple(int) or :obj:`Color`):Color of texts. The tuple of color should be in BGR order. Default: 'green' mask_color (None or str or tuple(int) or :obj:`Color`): Color of masks. The tuple of color should be in BGR order. Default: None thickness (int): Thickness of lines. Default: 2 font_size (int): Font size of texts. Default: 13 win_name (str): The window name. Default: '' wait_time (float): Value of waitKey param. Default: 0. show (bool): Whether to show the image. Default: False. out_file (str or None): The filename to write the image. Default: None. Returns: img (Tensor): Only if not `show` or `out_file` """ assert isinstance(result, tuple) bbox_result, mask_result = result bboxes = np.vstack(bbox_result) img = mmcv.imread(img) img = img.copy() labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) if len(labels) == 0: bboxes = np.zeros([0, 5]) masks = np.zeros([0, 0, 0]) # draw segmentation masks else: masks = mmcv.concat_list(mask_result) if isinstance(masks[0], torch.Tensor): masks = torch.stack(masks, dim=0).detach().cpu().numpy() else: masks = np.stack(masks, axis=0) # dummy bboxes if bboxes[:, :4].sum() == 0: num_masks = len(bboxes) x_any = masks.any(axis=1) y_any = masks.any(axis=2) for idx in range(num_masks): x = np.where(x_any[idx, :])[0] y = np.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: bboxes[idx, :4] = np.array( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32) # if out_file specified, do not show image in window if out_file is not None: show = False # draw bounding boxes img = imshow_det_bboxes( img, bboxes, labels, masks, class_names=self.CLASSES, score_thr=score_thr, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if not (show or out_file): return img ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/solo.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage_instance_seg import SingleStageInstanceSegmentor @DETECTORS.register_module() class SOLO(SingleStageInstanceSegmentor): """`SOLO: Segmenting Objects by Locations `_ """ def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, init_cfg=None, pretrained=None): super().__init__( backbone=backbone, neck=neck, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg, pretrained=pretrained) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/solov2.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage_instance_seg import SingleStageInstanceSegmentor @DETECTORS.register_module() class SOLOv2(SingleStageInstanceSegmentor): """`SOLOv2: Dynamic and Fast Instance Segmentation `_ """ def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, init_cfg=None, pretrained=None): super().__init__( backbone=backbone, neck=neck, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg, pretrained=pretrained) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/sparse_rcnn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module() class SparseRCNN(TwoStageDetector): r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with Learnable Proposals `_""" def __init__(self, *args, **kwargs): super(SparseRCNN, self).__init__(*args, **kwargs) assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \ 'do not support external proposals' def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): """Forward function of SparseR-CNN and QueryInst in train stage. Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor): specify which bounding boxes can be ignored when computing the loss. gt_masks (List[Tensor], optional) : Segmentation masks for each box. This is required to train QueryInst. proposals (List[Tensor], optional): override rpn proposals with custom proposals. Use when `with_rpn` is False. Returns: dict[str, Tensor]: a dictionary of loss components """ assert proposals is None, 'Sparse R-CNN and QueryInst ' \ 'do not support external proposals' x = self.extract_feat(img) proposal_boxes, proposal_features, imgs_whwh = \ self.rpn_head.forward_train(x, img_metas) roi_losses = self.roi_head.forward_train( x, proposal_boxes, proposal_features, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, gt_masks=gt_masks, imgs_whwh=imgs_whwh) return roi_losses def simple_test(self, img, img_metas, rescale=False): """Test function without test time augmentation. Args: imgs (list[torch.Tensor]): List of multiple images img_metas (list[dict]): List of image information. rescale (bool): Whether to rescale the results. Defaults to False. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ x = self.extract_feat(img) proposal_boxes, proposal_features, imgs_whwh = \ self.rpn_head.simple_test_rpn(x, img_metas) results = self.roi_head.simple_test( x, proposal_boxes, proposal_features, img_metas, imgs_whwh=imgs_whwh, rescale=rescale) return results def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/analysis_tools/get_flops.py` """ # backbone x = self.extract_feat(img) # rpn num_imgs = len(img) dummy_img_metas = [ dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs) ] proposal_boxes, proposal_features, imgs_whwh = \ self.rpn_head.simple_test_rpn(x, dummy_img_metas) # roi_head roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, proposal_features, dummy_img_metas) return roi_outs ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/tood.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class TOOD(SingleStageDetector): r"""Implementation of `TOOD: Task-aligned One-stage Object Detection. `_.""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def set_epoch(self, epoch): self.bbox_head.epoch = epoch ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/trident_faster_rcnn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .faster_rcnn import FasterRCNN @DETECTORS.register_module() class TridentFasterRCNN(FasterRCNN): """Implementation of `TridentNet `_""" def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(TridentFasterRCNN, self).__init__( backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) assert self.backbone.num_branch == self.roi_head.num_branch assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx self.num_branch = self.backbone.num_branch self.test_branch_idx = self.backbone.test_branch_idx def simple_test(self, img, img_metas, proposals=None, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if proposals is None: num_branch = (self.num_branch if self.test_branch_idx == -1 else 1) trident_img_metas = img_metas * num_branch proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas) else: proposal_list = proposals # TODO: Fix trident_img_metas undefined errors # when proposals is specified return self.roi_head.simple_test( x, proposal_list, trident_img_metas, rescale=rescale) def aug_test(self, imgs, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ x = self.extract_feats(imgs) num_branch = (self.num_branch if self.test_branch_idx == -1 else 1) trident_img_metas = [img_metas * num_branch for img_metas in img_metas] proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas) return self.roi_head.aug_test( x, proposal_list, img_metas, rescale=rescale) def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs): """make copies of img and gts to fit multi-branch.""" trident_gt_bboxes = tuple(gt_bboxes * self.num_branch) trident_gt_labels = tuple(gt_labels * self.num_branch) trident_img_metas = tuple(img_metas * self.num_branch) return super(TridentFasterRCNN, self).forward_train(img, trident_img_metas, trident_gt_bboxes, trident_gt_labels) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/two_stage.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector @DETECTORS.register_module() class TwoStageDetector(BaseDetector): """Base class for two-stage detectors. Two-stage detectors typically consisting of a region proposal network and a task-specific regression head. """ def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(TwoStageDetector, self).__init__(init_cfg) if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') backbone.pretrained = pretrained self.backbone = build_backbone(backbone) if neck is not None: self.neck = build_neck(neck) if rpn_head is not None: rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None rpn_head_ = rpn_head.copy() rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) self.rpn_head = build_head(rpn_head_) if roi_head is not None: # update train and test cfg here for now # TODO: refactor assigner & sampler rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None roi_head.update(train_cfg=rcnn_train_cfg) roi_head.update(test_cfg=test_cfg.rcnn) roi_head.pretrained = pretrained self.roi_head = build_head(roi_head) self.train_cfg = train_cfg self.test_cfg = test_cfg @property def with_rpn(self): """bool: whether the detector has RPN""" return hasattr(self, 'rpn_head') and self.rpn_head is not None @property def with_roi_head(self): """bool: whether the detector has a RoI head""" return hasattr(self, 'roi_head') and self.roi_head is not None def extract_feat(self, img): """Directly extract features from the backbone+neck.""" x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/analysis_tools/get_flops.py` """ outs = () # backbone x = self.extract_feat(img) # rpn if self.with_rpn: rpn_outs = self.rpn_head(x) outs = outs + (rpn_outs, ) proposals = torch.randn(1000, 4).to(img.device) # roi_head roi_outs = self.roi_head.forward_dummy(x, proposals) outs = outs + (roi_outs, ) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. proposals : override rpn proposals with custom proposals. Use when `with_rpn` is False. Returns: dict[str, Tensor]: a dictionary of loss components """ x = self.extract_feat(img) losses = dict() # RPN forward and loss if self.with_rpn: proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) rpn_losses, proposal_list = self.rpn_head.forward_train( x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore, proposal_cfg=proposal_cfg, **kwargs) losses.update(rpn_losses) else: proposal_list = proposals roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks, **kwargs) losses.update(roi_losses) return losses async def async_simple_test(self, img, img_meta, proposals=None, rescale=False): """Async test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if proposals is None: proposal_list = await self.rpn_head.async_simple_test_rpn( x, img_meta) else: proposal_list = proposals return await self.roi_head.async_simple_test( x, proposal_list, img_meta, rescale=rescale) def simple_test(self, img, img_metas, proposals=None, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if proposals is None: proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) else: proposal_list = proposals return self.roi_head.simple_test( x, proposal_list, img_metas, rescale=rescale) def aug_test(self, imgs, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ x = self.extract_feats(imgs) proposal_list = self.rpn_head.aug_test_rpn(x, img_metas) return self.roi_head.aug_test( x, proposal_list, img_metas, rescale=rescale) def onnx_export(self, img, img_metas): img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape x = self.extract_feat(img) proposals = self.rpn_head.onnx_export(x, img_metas) if hasattr(self.roi_head, 'onnx_export'): return self.roi_head.onnx_export(x, proposals, img_metas) else: raise NotImplementedError( f'{self.__class__.__name__} can not ' f'be exported to ONNX. Please refer to the ' f'list of supported models,' f'https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx' # noqa E501 ) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/vfnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class VFNet(SingleStageDetector): """Implementation of `VarifocalNet (VFNet).`_""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/yolact.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import bbox2result from ..builder import DETECTORS, build_head from .single_stage import SingleStageDetector @DETECTORS.register_module() class YOLACT(SingleStageDetector): """Implementation of `YOLACT `_""" def __init__(self, backbone, neck, bbox_head, segm_head, mask_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) self.segm_head = build_head(segm_head) self.mask_head = build_head(mask_head) def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/analysis_tools/get_flops.py` """ feat = self.extract_feat(img) bbox_outs = self.bbox_head(feat) prototypes = self.mask_head.forward_dummy(feat[0]) return (bbox_outs, prototypes) def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # convert Bitmap mask or Polygon Mask to Tensor here gt_masks = [ gt_mask.to_tensor(dtype=torch.uint8, device=img.device) for gt_mask in gt_masks ] x = self.extract_feat(img) cls_score, bbox_pred, coeff_pred = self.bbox_head(x) bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels, img_metas) losses, sampling_results = self.bbox_head.loss( *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) segm_head_outs = self.segm_head(x[0]) loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels) losses.update(loss_segm) mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas, sampling_results) loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results) losses.update(loss_mask) # check NaN and Inf for loss_name in losses.keys(): assert torch.isfinite(torch.stack(losses[loss_name]))\ .all().item(), '{} becomes infinite or NaN!'\ .format(loss_name) return losses def simple_test(self, img, img_metas, rescale=False): """Test function without test-time augmentation.""" feat = self.extract_feat(img) det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test( feat, img_metas, rescale=rescale) bbox_results = [ bbox2result(det_bbox, det_label, self.bbox_head.num_classes) for det_bbox, det_label in zip(det_bboxes, det_labels) ] segm_results = self.mask_head.simple_test( feat, det_bboxes, det_labels, det_coeffs, img_metas, rescale=rescale) return list(zip(bbox_results, segm_results)) def aug_test(self, imgs, img_metas, rescale=False): """Test with augmentations.""" raise NotImplementedError( 'YOLACT does not support test-time augmentation') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/yolo.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import torch from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class YOLOV3(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def onnx_export(self, img, img_metas): """Test function for exporting to ONNX, without test time augmentation. Args: img (torch.Tensor): input images. img_metas (list[dict]): List of image information. Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ x = self.extract_feat(img) outs = self.bbox_head.forward(x) # get shape as tensor img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) return det_bboxes, det_labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/yolof.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class YOLOF(SingleStageDetector): r"""Implementation of `You Only Look One-level Feature `_""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/yolox.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import random import torch import torch.distributed as dist import torch.nn.functional as F from mmcv.runner import get_dist_info from ...utils import log_img_scale from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class YOLOX(SingleStageDetector): r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021 `_ Note: Considering the trade-off between training speed and accuracy, multi-scale training is temporarily kept. More elegant implementation will be adopted in the future. Args: backbone (nn.Module): The backbone module. neck (nn.Module): The neck module. bbox_head (nn.Module): The bbox head module. train_cfg (obj:`ConfigDict`, optional): The training config of YOLOX. Default: None. test_cfg (obj:`ConfigDict`, optional): The testing config of YOLOX. Default: None. pretrained (str, optional): model pretrained path. Default: None. input_size (tuple): The model default input image size. The shape order should be (height, width). Default: (640, 640). size_multiplier (int): Image size multiplication factor. Default: 32. random_size_range (tuple): The multi-scale random range during multi-scale training. The real training image size will be multiplied by size_multiplier. Default: (15, 25). random_size_interval (int): The iter interval of change image size. Default: 10. init_cfg (dict, optional): Initialization config dict. Default: None. """ def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, input_size=(640, 640), size_multiplier=32, random_size_range=(15, 25), random_size_interval=10, init_cfg=None): super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) log_img_scale(input_size, skip_square=True) self.rank, self.world_size = get_dist_info() self._default_input_size = input_size self._input_size = input_size self._random_size_range = random_size_range self._random_size_interval = random_size_interval self._size_multiplier = size_multiplier self._progress_in_iter = 0 def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ # Multi-scale training img, gt_bboxes = self._preprocess(img, gt_bboxes) losses = super(YOLOX, self).forward_train(img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) # random resizing if (self._progress_in_iter + 1) % self._random_size_interval == 0: self._input_size = self._random_resize(device=img.device) self._progress_in_iter += 1 return losses def _preprocess(self, img, gt_bboxes): scale_y = self._input_size[0] / self._default_input_size[0] scale_x = self._input_size[1] / self._default_input_size[1] if scale_x != 1 or scale_y != 1: img = F.interpolate( img, size=self._input_size, mode='bilinear', align_corners=False) for gt_bbox in gt_bboxes: gt_bbox[..., 0::2] = gt_bbox[..., 0::2] * scale_x gt_bbox[..., 1::2] = gt_bbox[..., 1::2] * scale_y return img, gt_bboxes def _random_resize(self, device): tensor = torch.LongTensor(2).to(device) if self.rank == 0: size = random.randint(*self._random_size_range) aspect_ratio = float( self._default_input_size[1]) / self._default_input_size[0] size = (self._size_multiplier * size, self._size_multiplier * int(aspect_ratio * size)) tensor[0] = size[0] tensor[1] = size[1] if self.world_size > 1: dist.barrier() dist.broadcast(tensor, 0) input_size = (tensor[0].item(), tensor[1].item()) return input_size ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .accuracy import Accuracy, accuracy from .ae_loss import AssociativeEmbeddingLoss from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, cross_entropy, mask_cross_entropy) from .dice_loss import DiceLoss from .focal_loss import FocalLoss, sigmoid_focal_loss from .gaussian_focal_loss import GaussianFocalLoss from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss from .ghm_loss import GHMC, GHMR from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss, bounded_iou_loss, iou_loss) from .kd_loss import KnowledgeDistillationKLDivLoss from .mse_loss import MSELoss, mse_loss from .pisa_loss import carl_loss, isr_p from .seesaw_loss import SeesawLoss from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss from .utils import reduce_loss, weight_reduce_loss, weighted_loss from .varifocal_loss import VarifocalLoss __all__ = [ 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/accuracy.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn @mmcv.jit(coderize=True) def accuracy(pred, target, topk=1, thresh=None): """Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class) target (torch.Tensor): The target of each prediction, shape (N, ) topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. Returns: float | tuple[float]: If the input ``topk`` is a single integer, the function will return a single float as accuracy. If ``topk`` is a tuple containing multiple integers, the function will return a tuple containing accuracies of each ``topk`` number. """ assert isinstance(topk, (int, tuple)) if isinstance(topk, int): topk = (topk, ) return_single = True else: return_single = False maxk = max(topk) if pred.size(0) == 0: accu = [pred.new_tensor(0.) for i in range(len(topk))] return accu[0] if return_single else accu assert pred.ndim == 2 and target.ndim == 1 assert pred.size(0) == target.size(0) assert maxk <= pred.size(1), \ f'maxk {maxk} exceeds pred dimension {pred.size(1)}' pred_value, pred_label = pred.topk(maxk, dim=1) pred_label = pred_label.t() # transpose to shape (maxk, N) correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) if thresh is not None: # Only prediction values larger than thresh are counted as correct correct = correct & (pred_value > thresh).t() res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / pred.size(0))) return res[0] if return_single else res class Accuracy(nn.Module): def __init__(self, topk=(1, ), thresh=None): """Module to calculate the accuracy. Args: topk (tuple, optional): The criterion used to calculate the accuracy. Defaults to (1,). thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. """ super().__init__() self.topk = topk self.thresh = thresh def forward(self, pred, target): """Forward function to calculate accuracy. Args: pred (torch.Tensor): Prediction of models. target (torch.Tensor): Target for each prediction. Returns: tuple[float]: The accuracies under different topk criterions. """ return accuracy(pred, target, self.topk, self.thresh) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/ae_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES @mmcv.jit(derivate=True, coderize=True) def ae_loss_per_image(tl_preds, br_preds, match): """Associative Embedding Loss in one image. Associative Embedding Loss including two parts: pull loss and push loss. Pull loss makes embedding vectors from same object closer to each other. Push loss distinguish embedding vector from different objects, and makes the gap between them is large enough. During computing, usually there are 3 cases: - no object in image: both pull loss and push loss will be 0. - one object in image: push loss will be 0 and pull loss is computed by the two corner of the only object. - more than one objects in image: pull loss is computed by corner pairs from each object, push loss is computed by each object with all other objects. We use confusion matrix with 0 in diagonal to compute the push loss. Args: tl_preds (tensor): Embedding feature map of left-top corner. br_preds (tensor): Embedding feature map of bottim-right corner. match (list): Downsampled coordinates pair of each ground truth box. """ tl_list, br_list, me_list = [], [], [] if len(match) == 0: # no object in image pull_loss = tl_preds.sum() * 0. push_loss = tl_preds.sum() * 0. else: for m in match: [tl_y, tl_x], [br_y, br_x] = m tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1) br_e = br_preds[:, br_y, br_x].view(-1, 1) tl_list.append(tl_e) br_list.append(br_e) me_list.append((tl_e + br_e) / 2.0) tl_list = torch.cat(tl_list) br_list = torch.cat(br_list) me_list = torch.cat(me_list) assert tl_list.size() == br_list.size() # N is object number in image, M is dimension of embedding vector N, M = tl_list.size() pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2) pull_loss = pull_loss.sum() / N margin = 1 # exp setting of CornerNet, details in section 3.3 of paper # confusion matrix of push loss conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list conf_weight = 1 - torch.eye(N).type_as(me_list) conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs()) if N > 1: # more than one object in current image push_loss = F.relu(conf_mat).sum() / (N * (N - 1)) else: push_loss = tl_preds.sum() * 0. return pull_loss, push_loss @LOSSES.register_module() class AssociativeEmbeddingLoss(nn.Module): """Associative Embedding Loss. More details can be found in `Associative Embedding `_ and `CornerNet `_ . Code is modified from `kp_utils.py `_ # noqa: E501 Args: pull_weight (float): Loss weight for corners from same object. push_weight (float): Loss weight for corners from different object. """ def __init__(self, pull_weight=0.25, push_weight=0.25): super(AssociativeEmbeddingLoss, self).__init__() self.pull_weight = pull_weight self.push_weight = push_weight def forward(self, pred, target, match): """Forward function.""" batch = pred.size(0) pull_all, push_all = 0.0, 0.0 for i in range(batch): pull, push = ae_loss_per_image(pred[i], target[i], match[i]) pull_all += self.pull_weight * pull push_all += self.push_weight * push return pull_all, push_all ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/balanced_l1_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): """Calculate balanced L1 loss. Please see the `Libra R-CNN `_ Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). beta (float): The loss is a piecewise function of prediction and target and ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert beta > 0 if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() diff = torch.abs(pred - target) b = np.e**(gamma / alpha) - 1 loss = torch.where( diff < beta, alpha / b * (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss @LOSSES.register_module() class BalancedL1Loss(nn.Module): """Balanced L1 Loss. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Args: alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. beta (float, optional): The loss is a piecewise function of prediction and target. ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function of loss. Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * balanced_l1_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/cross_entropy_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weight_reduce_loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=-100, avg_non_ignore=False): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. If None, it will be set to default value. Default: -100. avg_non_ignore (bool): The flag decides to whether the loss is only averaged over non-ignored targets. Default: False. Returns: torch.Tensor: The calculated loss """ # The default value of ignore_index is the same as F.cross_entropy ignore_index = -100 if ignore_index is None else ignore_index # element-wise losses loss = F.cross_entropy( pred, label, weight=class_weight, reduction='none', ignore_index=ignore_index) # average loss over non-ignored elements # pytorch's official cross_entropy average loss over non-ignored elements # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa if (avg_factor is None) and avg_non_ignore and reduction == 'mean': avg_factor = label.numel() - (label == ignore_index).sum().item() # apply weights and do the reduction if weight is not None: weight = weight.float() loss = weight_reduce_loss( loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): """Expand onehot labels to match the size of prediction.""" bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero( valid_mask & (labels < label_channels), as_tuple=False) if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), label_channels).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) bin_label_weights *= valid_mask return bin_labels, bin_label_weights, valid_mask def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=-100, avg_non_ignore=False): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1) or (N, ). When the shape of pred is (N, 1), label will be expanded to one-hot format, and when the shape of pred is (N, ), label will not be expanded to one-hot format. label (torch.Tensor): The learning label of the prediction, with shape (N, ). weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. If None, it will be set to default value. Default: -100. avg_non_ignore (bool): The flag decides to whether the loss is only averaged over non-ignored targets. Default: False. Returns: torch.Tensor: The calculated loss. """ # The default value of ignore_index is the same as F.cross_entropy ignore_index = -100 if ignore_index is None else ignore_index if pred.dim() != label.dim(): label, weight, valid_mask = _expand_onehot_labels( label, weight, pred.size(-1), ignore_index) else: # should mask out the ignored elements valid_mask = ((label >= 0) & (label != ignore_index)).float() if weight is not None: # The inplace writing method will have a mismatched broadcast # shape error if the weight and valid_mask dimensions # are inconsistent such as (B,N,1) and (B,N,C). weight = weight * valid_mask else: weight = valid_mask # average loss over non-ignored elements if (avg_factor is None) and avg_non_ignore and reduction == 'mean': avg_factor = valid_mask.sum().item() # weighted element-wise losses weight = weight.float() loss = F.binary_cross_entropy_with_logits( pred, label.float(), pos_weight=class_weight, reduction='none') # do the reduction for the weighted loss loss = weight_reduce_loss( loss, weight, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None, **kwargs): """Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C, *), C is the number of classes. The trailing * indicates arbitrary shape. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask corresponding object. This will be used to select the mask in the of the class which the object belongs to when the mask prediction if not class-agnostic. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (None): Placeholder, to be consistent with other loss. Default: None. Returns: torch.Tensor: The calculated loss Example: >>> N, C = 3, 11 >>> H, W = 2, 2 >>> pred = torch.randn(N, C, H, W) * 1000 >>> target = torch.rand(N, H, W) >>> label = torch.randint(0, C, size=(N,)) >>> reduction = 'mean' >>> avg_factor = None >>> class_weights = None >>> loss = mask_cross_entropy(pred, target, label, reduction, >>> avg_factor, class_weights) >>> assert loss.shape == (1,) """ assert ignore_index is None, 'BCE loss does not support ignore_index' # TODO: handle these two reserved arguments assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits( pred_slice, target, weight=class_weight, reduction='mean')[None] @LOSSES.register_module() class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, ignore_index=None, loss_weight=1.0, avg_non_ignore=False): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float], optional): Weight of each class. Defaults to None. ignore_index (int | None): The label index to be ignored. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. avg_non_ignore (bool): The flag decides to whether the loss is only averaged over non-ignored targets. Default: False. """ super(CrossEntropyLoss, self).__init__() assert (use_sigmoid is False) or (use_mask is False) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.ignore_index = ignore_index self.avg_non_ignore = avg_non_ignore if ((ignore_index is not None) and not self.avg_non_ignore and self.reduction == 'mean'): warnings.warn( 'Default ``avg_non_ignore`` is False, if you would like to ' 'ignore the certain label and average loss over non-ignore ' 'labels, which is the same with PyTorch official ' 'cross_entropy, set ``avg_non_ignore=True``.') if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def extra_repr(self): """Extra repr.""" s = f'avg_non_ignore={self.avg_non_ignore}' return s def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, ignore_index=None, **kwargs): """Forward function. Args: cls_score (torch.Tensor): The prediction. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". ignore_index (int | None): The label index to be ignored. If not None, it will override the default value. Default: None. Returns: torch.Tensor: The calculated loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if ignore_index is None: ignore_index = self.ignore_index if self.class_weight is not None: class_weight = cls_score.new_tensor( self.class_weight, device=cls_score.device) else: class_weight = None loss_cls = self.loss_weight * self.cls_criterion( cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, ignore_index=ignore_index, avg_non_ignore=self.avg_non_ignore, **kwargs) return loss_cls ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/dice_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from ..builder import LOSSES from .utils import weight_reduce_loss def dice_loss(pred, target, weight=None, eps=1e-3, reduction='mean', naive_dice=False, avg_factor=None): """Calculate dice loss, there are two forms of dice loss is supported: - the one proposed in `V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation `_. - the dice loss in which the power of the number in the denominator is the first power instead of the second power. Args: pred (torch.Tensor): The prediction, has a shape (n, *) target (torch.Tensor): The learning label of the prediction, shape (n, *), same shape of pred. weight (torch.Tensor, optional): The weight of loss for each prediction, has a shape (n,). Defaults to None. eps (float): Avoid dividing by zero. Default: 1e-3. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". naive_dice (bool, optional): If false, use the dice loss defined in the V-Net paper, otherwise, use the naive dice loss in which the power of the number in the denominator is the first power instead of the second power.Defaults to False. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ input = pred.flatten(1) target = target.flatten(1).float() a = torch.sum(input * target, 1) if naive_dice: b = torch.sum(input, 1) c = torch.sum(target, 1) d = (2 * a + eps) / (b + c + eps) else: b = torch.sum(input * input, 1) + eps c = torch.sum(target * target, 1) + eps d = (2 * a) / (b + c) loss = 1 - d if weight is not None: assert weight.ndim == loss.ndim assert len(weight) == len(pred) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class DiceLoss(nn.Module): def __init__(self, use_sigmoid=True, activate=True, reduction='mean', naive_dice=False, loss_weight=1.0, eps=1e-3): """Compute dice loss. Args: use_sigmoid (bool, optional): Whether to the prediction is used for sigmoid or softmax. Defaults to True. activate (bool): Whether to activate the predictions inside, this will disable the inside sigmoid operation. Defaults to True. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". Defaults to 'mean'. naive_dice (bool, optional): If false, use the dice loss defined in the V-Net paper, otherwise, use the naive dice loss in which the power of the number in the denominator is the first power instead of the second power. Defaults to False. loss_weight (float, optional): Weight of loss. Defaults to 1.0. eps (float): Avoid dividing by zero. Defaults to 1e-3. """ super(DiceLoss, self).__init__() self.use_sigmoid = use_sigmoid self.reduction = reduction self.naive_dice = naive_dice self.loss_weight = loss_weight self.eps = eps self.activate = activate def forward(self, pred, target, weight=None, reduction_override=None, avg_factor=None): """Forward function. Args: pred (torch.Tensor): The prediction, has a shape (n, *). target (torch.Tensor): The label of the prediction, shape (n, *), same shape of pred. weight (torch.Tensor, optional): The weight of loss for each prediction, has a shape (n,). Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.activate: if self.use_sigmoid: pred = pred.sigmoid() else: raise NotImplementedError loss = self.loss_weight * dice_loss( pred, target, weight, eps=self.eps, reduction=reduction, naive_dice=self.naive_dice, avg_factor=avg_factor) return loss ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/focal_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss from ..builder import LOSSES from .utils import weight_reduce_loss # This method is only for debugging def py_sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """PyTorch version of `Focal Loss `_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss def py_focal_loss_with_prob(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """PyTorch version of `Focal Loss `_. Different from `py_sigmoid_focal_loss`, this function accepts probability as input. Args: pred (torch.Tensor): The prediction probability with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ num_classes = pred.size(1) target = F.one_hot(target, num_classes=num_classes + 1) target = target[:, :num_classes] target = target.type_as(pred) pt = (1 - pred) * target + pred * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy( pred, target, reduction='none') * focal_weight if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): r"""A wrapper of cuda version `Focal Loss `_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # Function.apply does not accept keyword arguments, so the decorator # "weighted_loss" is not applicable loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma, alpha, None, 'none') if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class FocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0, activated=False): """`Focal Loss `_ Args: use_sigmoid (bool, optional): Whether to the prediction is used for sigmoid or softmax. Defaults to True. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. activated (bool, optional): Whether the input is activated. If True, it means the input has been activated and can be treated as probabilities. Else, it should be treated as logits. Defaults to False. """ super(FocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' self.use_sigmoid = use_sigmoid self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight self.activated = activated def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if self.activated: calculate_loss_func = py_focal_loss_with_prob else: if torch.cuda.is_available() and pred.is_cuda: calculate_loss_func = sigmoid_focal_loss else: num_classes = pred.size(1) target = F.one_hot(target, num_classes=num_classes + 1) target = target[:, :num_classes] calculate_loss_func = py_sigmoid_focal_loss loss_cls = self.loss_weight * calculate_loss_func( pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/gaussian_focal_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss `_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss @LOSSES.register_module() class GaussianFocalLoss(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper `_ Code is modified from `kp_utils.py `_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negative samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction in gaussian distribution. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_reg = self.loss_weight * gaussian_focal_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor) return loss_reg ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/gfocal_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def quality_focal_loss(pred, target, beta=2.0): r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection `_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,). """ assert len(target) == 2, """target for QFL must be a tuple of two elements, including category label and quality label, respectively""" # label denotes the category id, score denotes the quality score label, score = target # negatives are supervised by 0 quality score pred_sigmoid = pred.sigmoid() scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = F.binary_cross_entropy_with_logits( pred, zerolabel, reduction='none') * scale_factor.pow(beta) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() # positives are supervised by bbox quality (IoU) score scale_factor = score[pos] - pred_sigmoid[pos, pos_label] loss[pos, pos_label] = F.binary_cross_entropy_with_logits( pred[pos, pos_label], score[pos], reduction='none') * scale_factor.abs().pow(beta) loss = loss.sum(dim=1, keepdim=False) return loss @weighted_loss def quality_focal_loss_with_prob(pred, target, beta=2.0): r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection `_. Different from `quality_focal_loss`, this function accepts probability as input. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,). """ assert len(target) == 2, """target for QFL must be a tuple of two elements, including category label and quality label, respectively""" # label denotes the category id, score denotes the quality score label, score = target # negatives are supervised by 0 quality score pred_sigmoid = pred scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = F.binary_cross_entropy( pred, zerolabel, reduction='none') * scale_factor.pow(beta) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() # positives are supervised by bbox quality (IoU) score scale_factor = score[pos] - pred_sigmoid[pos, pos_label] loss[pos, pos_label] = F.binary_cross_entropy( pred[pos, pos_label], score[pos], reduction='none') * scale_factor.abs().pow(beta) loss = loss.sum(dim=1, keepdim=False) return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def distribution_focal_loss(pred, label): r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection `_. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (torch.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,). """ dis_left = label.long() dis_right = dis_left + 1 weight_left = dis_right.float() - label weight_right = label - dis_left.float() loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \ + F.cross_entropy(pred, dis_right, reduction='none') * weight_right return loss @LOSSES.register_module() class QualityFocalLoss(nn.Module): r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection `_. Args: use_sigmoid (bool): Whether sigmoid operation is conducted in QFL. Defaults to True. beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. activated (bool, optional): Whether the input is activated. If True, it means the input has been activated and can be treated as probabilities. Else, it should be treated as logits. Defaults to False. """ def __init__(self, use_sigmoid=True, beta=2.0, reduction='mean', loss_weight=1.0, activated=False): super(QualityFocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid in QFL supported now.' self.use_sigmoid = use_sigmoid self.beta = beta self.reduction = reduction self.loss_weight = loss_weight self.activated = activated def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if self.activated: calculate_loss_func = quality_focal_loss_with_prob else: calculate_loss_func = quality_focal_loss loss_cls = self.loss_weight * calculate_loss_func( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls @LOSSES.register_module() class DistributionFocalLoss(nn.Module): r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection `_. Args: reduction (str): Options are `'none'`, `'mean'` and `'sum'`. loss_weight (float): Loss weight of current loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(DistributionFocalLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. target (torch.Tensor): Target distance label for bounding boxes with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * distribution_focal_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_cls ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/ghm_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weight_reduce_loss def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero( (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 bin_label_weights = label_weights.view(-1, 1).expand( label_weights.size(0), label_channels) return bin_labels, bin_label_weights # TODO: code refactoring to make it consistent with other losses @LOSSES.register_module() class GHMC(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector `_. Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. reduction (str): Options are "none", "mean" and "sum". Defaults to "mean" """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0, reduction='mean'): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-6 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, reduction_override=None, **kwargs): """Calculate the GHM-C loss. Args: pred (float tensor of size [batch_num, class_num]): The direct prediction of classification fc layer. target (float tensor of size [batch_num, class_num]): Binary class target for each sample. label_weight (float tensor of size [batch_num, class_num]): the value is 1 if the sample is valid and 0 if ignored. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: The gradient harmonized loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) # the target should be binary class label if pred.dim() != target.dim(): target, label_weight = _expand_onehot_labels( target, label_weight, pred.size(-1)) target, label_weight = target.float(), label_weight.float() edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) # gradient length g = torch.abs(pred.sigmoid().detach() - target) valid = label_weight > 0 tot = max(valid.float().sum().item(), 1.0) n = 0 # n valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin n += 1 if n > 0: weights = weights / n loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') loss = weight_reduce_loss( loss, weights, reduction=reduction, avg_factor=tot) return loss * self.loss_weight # TODO: code refactoring to make it consistent with other losses @LOSSES.register_module() class GHMR(nn.Module): """GHM Regression Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector `_. Args: mu (float): The parameter for the Authentic Smooth L1 loss. bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. loss_weight (float): The weight of the total GHM-R loss. reduction (str): Options are "none", "mean" and "sum". Defaults to "mean" """ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0, reduction='mean'): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] = 1e3 self.momentum = momentum if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight self.reduction = reduction # TODO: support reduction parameter def forward(self, pred, target, label_weight, avg_factor=None, reduction_override=None): """Calculate the GHM-R loss. Args: pred (float tensor of size [batch_num, 4 (* class_num)]): The prediction of box regression layer. Channel number can be 4 or 4 * class_num depending on whether it is class-agnostic. target (float tensor of size [batch_num, 4 (* class_num)]): The target regression values with the same size of pred. label_weight (float tensor of size [batch_num, 4 (* class_num)]): The weight of each sample, 0 if ignored. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: The gradient harmonized loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) mu = self.mu edges = self.edges mmt = self.momentum # ASL1 loss diff = pred - target loss = torch.sqrt(diff * diff + mu * mu) - mu # gradient length g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() weights = torch.zeros_like(g) valid = label_weight > 0 tot = max(label_weight.float().sum().item(), 1.0) n = 0 # n: valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: n += 1 if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin if n > 0: weights /= n loss = weight_reduce_loss( loss, weights, reduction=reduction, avg_factor=tot) return loss * self.loss_weight ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/iou_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import warnings import mmcv import torch import torch.nn as nn from mmdet.core import bbox_overlaps from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def iou_loss(pred, target, linear=False, mode='log', eps=1e-6): """IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). linear (bool, optional): If True, use linear scale of loss instead of log scale. Default: False. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' eps (float): Eps to avoid log(0). Return: torch.Tensor: Loss tensor. """ assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'iou_loss is deprecated, please use "mode=`linear`" ' 'instead.') ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) if mode == 'linear': loss = 1 - ious elif mode == 'square': loss = 1 - ious**2 elif mode == 'log': loss = -ious.log() else: raise NotImplementedError return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3): """BIoULoss. This is an implementation of paper `Improving Object Localization with Fitness NMS and Bounded IoU Loss. `_. Args: pred (torch.Tensor): Predicted bboxes. target (torch.Tensor): Target bboxes. beta (float): beta parameter in smoothl1. eps (float): eps to avoid NaN. """ pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 pred_w = pred[:, 2] - pred[:, 0] pred_h = pred[:, 3] - pred[:, 1] with torch.no_grad(): target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 target_ctry = (target[:, 1] + target[:, 3]) * 0.5 target_w = target[:, 2] - target[:, 0] target_h = target[:, 3] - target[:, 1] dx = target_ctrx - pred_ctrx dy = target_ctry - pred_ctry loss_dx = 1 - torch.max( (target_w - 2 * dx.abs()) / (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) loss_dy = 1 - torch.max( (target_h - 2 * dy.abs()) / (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w + eps)) loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h + eps)) # view(..., -1) does not work for empty tensor loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).flatten(1) loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta) return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def giou_loss(pred, target, eps=1e-7): r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression `_. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps) loss = 1 - gious return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def diou_loss(pred, target, eps=1e-7): r"""`Implementation of Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_. Code is modified from https://github.com/Zzh-tju/DIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right # DIoU dious = ious - rho2 / c2 loss = 1 - dious return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def ciou_loss(pred, target, eps=1e-7): r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation `_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right factor = 4 / math.pi**2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = (ious > 0.5).float() * v / (1 - ious + v) # CIoU cious = ious - (rho2 / c2 + alpha * v) loss = 1 - cious.clamp(min=-1.0, max=1.0) return loss @LOSSES.register_module() class IoULoss(nn.Module): """IoULoss. Computing the IoU loss between a set of predicted bboxes and target bboxes. Args: linear (bool): If True, use linear scale of loss else determined by mode. Default: False. eps (float): Eps to avoid log(0). reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' """ def __init__(self, linear=False, eps=1e-6, reduction='mean', loss_weight=1.0, mode='log'): super(IoULoss, self).__init__() assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'IOULoss is deprecated, please use "mode=`linear`" ' 'instead.') self.mode = mode self.linear = linear self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if (weight is not None) and (not torch.any(weight > 0)) and ( reduction != 'none'): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # iou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * iou_loss( pred, target, weight, mode=self.mode, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class BoundedIoULoss(nn.Module): def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0): super(BoundedIoULoss, self).__init__() self.beta = beta self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * bounded_iou_loss( pred, target, weight, beta=self.beta, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class GIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(GIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * giou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class DIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(DIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * diou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class CIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(CIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * ciou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/kd_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True): r"""Loss function for knowledge distilling using KL divergence. Args: pred (Tensor): Predicted logits with shape (N, n + 1). soft_label (Tensor): Target logits with shape (N, N + 1). T (int): Temperature for distillation. detach_target (bool): Remove soft_label from automatic differentiation Returns: torch.Tensor: Loss tensor with shape (N,). """ assert pred.size() == soft_label.size() target = F.softmax(soft_label / T, dim=1) if detach_target: target = target.detach() kd_loss = F.kl_div( F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * ( T * T) return kd_loss @LOSSES.register_module() class KnowledgeDistillationKLDivLoss(nn.Module): """Loss function for knowledge distilling using KL divergence. Args: reduction (str): Options are `'none'`, `'mean'` and `'sum'`. loss_weight (float): Loss weight of current loss. T (int): Temperature for distillation. """ def __init__(self, reduction='mean', loss_weight=1.0, T=10): super(KnowledgeDistillationKLDivLoss, self).__init__() assert T >= 1 self.reduction = reduction self.loss_weight = loss_weight self.T = T def forward(self, pred, soft_label, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (Tensor): Predicted logits with shape (N, n + 1). soft_label (Tensor): Target logits with shape (N, N + 1). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss( pred, soft_label, weight, reduction=reduction, avg_factor=avg_factor, T=self.T) return loss_kd ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/mse_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def mse_loss(pred, target): """Wrapper of mse loss.""" return F.mse_loss(pred, target, reduction='none') @LOSSES.register_module() class MSELoss(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function of loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): Weight of the loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * mse_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/pisa_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.core import bbox_overlaps @mmcv.jit(derivate=True, coderize=True) def isr_p(cls_score, bbox_pred, bbox_targets, rois, sampling_results, loss_cls, bbox_coder, k=2, bias=0, num_class=80): """Importance-based Sample Reweighting (ISR_P), positive part. Args: cls_score (Tensor): Predicted classification scores. bbox_pred (Tensor): Predicted bbox deltas. bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are labels, label_weights, bbox_targets, bbox_weights, respectively. rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs (two_stage) in shape (n, 5). sampling_results (obj): Sampling results. loss_cls (func): Classification loss func of the head. bbox_coder (obj): BBox coder of the head. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. num_class (int): Number of classes, default: 80. Return: tuple([Tensor]): labels, imp_based_label_weights, bbox_targets, bbox_target_weights """ labels, label_weights, bbox_targets, bbox_weights = bbox_targets pos_label_inds = ((labels >= 0) & (labels < num_class)).nonzero().reshape(-1) pos_labels = labels[pos_label_inds] # if no positive samples, return the original targets num_pos = float(pos_label_inds.size(0)) if num_pos == 0: return labels, label_weights, bbox_targets, bbox_weights # merge pos_assigned_gt_inds of per image to a single tensor gts = list() last_max_gt = 0 for i in range(len(sampling_results)): gt_i = sampling_results[i].pos_assigned_gt_inds gts.append(gt_i + last_max_gt) if len(gt_i) != 0: last_max_gt = gt_i.max() + 1 gts = torch.cat(gts) assert len(gts) == num_pos cls_score = cls_score.detach() bbox_pred = bbox_pred.detach() # For single stage detectors, rois here indicate anchors, in shape (N, 4) # For two stage detectors, rois are in shape (N, 5) if rois.size(-1) == 5: pos_rois = rois[pos_label_inds][:, 1:] else: pos_rois = rois[pos_label_inds] if bbox_pred.size(-1) > 4: bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4) else: pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4) # compute iou of the predicted bbox and the corresponding GT pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4) pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred) target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target) ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True) pos_imp_weights = label_weights[pos_label_inds] # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally, # then sorted again within the same-rank group max_l_num = pos_labels.bincount().max() for label in pos_labels.unique(): l_inds = (pos_labels == label).nonzero().view(-1) l_gts = gts[l_inds] for t in l_gts.unique(): t_inds = l_inds[l_gts == t] t_ious = ious[t_inds] _, t_iou_rank_idx = t_ious.sort(descending=True) _, t_iou_rank = t_iou_rank_idx.sort() ious[t_inds] += max_l_num - t_iou_rank.float() l_ious = ious[l_inds] _, l_iou_rank_idx = l_ious.sort(descending=True) _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR # linearly map HLR to label weights pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k) # normalize to make the new weighted loss value equal to the original loss pos_loss_cls = loss_cls( cls_score[pos_label_inds], pos_labels, reduction_override='none') if pos_loss_cls.dim() > 1: ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:, None] new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None] else: ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds] new_pos_loss_cls = pos_loss_cls * pos_imp_weights pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum() pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio label_weights[pos_label_inds] = pos_imp_weights bbox_targets = labels, label_weights, bbox_targets, bbox_weights return bbox_targets @mmcv.jit(derivate=True, coderize=True) def carl_loss(cls_score, labels, bbox_pred, bbox_targets, loss_bbox, k=1, bias=0.2, avg_factor=None, sigmoid=False, num_class=80): """Classification-Aware Regression Loss (CARL). Args: cls_score (Tensor): Predicted classification scores. labels (Tensor): Targets of classification. bbox_pred (Tensor): Predicted bbox deltas. bbox_targets (Tensor): Target of bbox regression. loss_bbox (func): Regression loss func of the head. bbox_coder (obj): BBox coder of the head. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. avg_factor (int): Average factor used in regression loss. sigmoid (bool): Activation of the classification score. num_class (int): Number of classes, default: 80. Return: dict: CARL loss dict. """ pos_label_inds = ((labels >= 0) & (labels < num_class)).nonzero().reshape(-1) if pos_label_inds.numel() == 0: return dict(loss_carl=cls_score.sum()[None] * 0.) pos_labels = labels[pos_label_inds] # multiply pos_cls_score with the corresponding bbox weight # and remain gradient if sigmoid: pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels] else: pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels] carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k) # normalize carl_loss_weight to make its sum equal to num positive num_pos = float(pos_cls_score.size(0)) weight_ratio = num_pos / carl_loss_weights.sum() carl_loss_weights *= weight_ratio if avg_factor is None: avg_factor = bbox_targets.size(0) # if is class agnostic, bbox pred is in shape (N, 4) # otherwise, bbox pred is in shape (N, #classes, 4) if bbox_pred.size(-1) > 4: bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels] else: pos_bbox_preds = bbox_pred[pos_label_inds] ori_loss_reg = loss_bbox( pos_bbox_preds, bbox_targets[pos_label_inds], reduction_override='none') / avg_factor loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum() return dict(loss_carl=loss_carl[None]) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/seesaw_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .accuracy import accuracy from .cross_entropy_loss import cross_entropy from .utils import weight_reduce_loss def seesaw_ce_loss(cls_score, labels, label_weights, cum_samples, num_classes, p, q, eps, reduction='mean', avg_factor=None): """Calculate the Seesaw CrossEntropy loss. Args: cls_score (torch.Tensor): The prediction with shape (N, C), C is the number of classes. labels (torch.Tensor): The learning label of the prediction. label_weights (torch.Tensor): Sample-wise loss weight. cum_samples (torch.Tensor): Cumulative samples for each category. num_classes (int): The number of classes. p (float): The ``p`` in the mitigation factor. q (float): The ``q`` in the compenstation factor. eps (float): The minimal value of divisor to smooth the computation of compensation factor reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: The calculated loss """ assert cls_score.size(-1) == num_classes assert len(cum_samples) == num_classes onehot_labels = F.one_hot(labels, num_classes) seesaw_weights = cls_score.new_ones(onehot_labels.size()) # mitigation factor if p > 0: sample_ratio_matrix = cum_samples[None, :].clamp( min=1) / cum_samples[:, None].clamp(min=1) index = (sample_ratio_matrix < 1.0).float() sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index) mitigation_factor = sample_weights[labels.long(), :] seesaw_weights = seesaw_weights * mitigation_factor # compensation factor if q > 0: scores = F.softmax(cls_score.detach(), dim=1) self_scores = scores[ torch.arange(0, len(scores)).to(scores.device).long(), labels.long()] score_matrix = scores / self_scores[:, None].clamp(min=eps) index = (score_matrix > 1.0).float() compensation_factor = score_matrix.pow(q) * index + (1 - index) seesaw_weights = seesaw_weights * compensation_factor cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels)) loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none') if label_weights is not None: label_weights = label_weights.float() loss = weight_reduce_loss( loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor) return loss @LOSSES.register_module() class SeesawLoss(nn.Module): """ Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) arXiv: https://arxiv.org/abs/2008.10032 Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Only False is supported. p (float, optional): The ``p`` in the mitigation factor. Defaults to 0.8. q (float, optional): The ``q`` in the compenstation factor. Defaults to 2.0. num_classes (int, optional): The number of classes. Default to 1203 for LVIS v1 dataset. eps (float, optional): The minimal value of divisor to smooth the computation of compensation factor reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 return_dict (bool, optional): Whether return the losses as a dict. Default to True. """ def __init__(self, use_sigmoid=False, p=0.8, q=2.0, num_classes=1203, eps=1e-2, reduction='mean', loss_weight=1.0, return_dict=True): super(SeesawLoss, self).__init__() assert not use_sigmoid self.use_sigmoid = False self.p = p self.q = q self.num_classes = num_classes self.eps = eps self.reduction = reduction self.loss_weight = loss_weight self.return_dict = return_dict # 0 for pos, 1 for neg self.cls_criterion = seesaw_ce_loss # cumulative samples for each category self.register_buffer( 'cum_samples', torch.zeros(self.num_classes + 1, dtype=torch.float)) # custom output channels of the classifier self.custom_cls_channels = True # custom activation of cls_score self.custom_activation = True # custom accuracy of the classsifier self.custom_accuracy = True def _split_cls_score(self, cls_score): # split cls_score to cls_score_classes and cls_score_objectness assert cls_score.size(-1) == self.num_classes + 2 cls_score_classes = cls_score[..., :-2] cls_score_objectness = cls_score[..., -2:] return cls_score_classes, cls_score_objectness def get_cls_channels(self, num_classes): """Get custom classification channels. Args: num_classes (int): The number of classes. Returns: int: The custom classification channels. """ assert num_classes == self.num_classes return num_classes + 2 def get_activation(self, cls_score): """Get custom activation of cls_score. Args: cls_score (torch.Tensor): The prediction with shape (N, C + 2). Returns: torch.Tensor: The custom activation of cls_score with shape (N, C + 1). """ cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) score_classes = F.softmax(cls_score_classes, dim=-1) score_objectness = F.softmax(cls_score_objectness, dim=-1) score_pos = score_objectness[..., [0]] score_neg = score_objectness[..., [1]] score_classes = score_classes * score_pos scores = torch.cat([score_classes, score_neg], dim=-1) return scores def get_accuracy(self, cls_score, labels): """Get custom accuracy w.r.t. cls_score and labels. Args: cls_score (torch.Tensor): The prediction with shape (N, C + 2). labels (torch.Tensor): The learning label of the prediction. Returns: Dict [str, torch.Tensor]: The accuracy for objectness and classes, respectively. """ pos_inds = labels < self.num_classes obj_labels = (labels == self.num_classes).long() cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) acc_objectness = accuracy(cls_score_objectness, obj_labels) acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds]) acc = dict() acc['acc_objectness'] = acc_objectness acc['acc_classes'] = acc_classes return acc def forward(self, cls_score, labels, label_weights=None, avg_factor=None, reduction_override=None): """Forward function. Args: cls_score (torch.Tensor): The prediction with shape (N, C + 2). labels (torch.Tensor): The learning label of the prediction. label_weights (torch.Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor | Dict [str, torch.Tensor]: if return_dict == False: The calculated loss | if return_dict == True: The dict of calculated losses for objectness and classes, respectively. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) assert cls_score.size(-1) == self.num_classes + 2 pos_inds = labels < self.num_classes # 0 for pos, 1 for neg obj_labels = (labels == self.num_classes).long() # accumulate the samples for each category unique_labels = labels.unique() for u_l in unique_labels: inds_ = labels == u_l.item() self.cum_samples[u_l] += inds_.sum() if label_weights is not None: label_weights = label_weights.float() else: label_weights = labels.new_ones(labels.size(), dtype=torch.float) cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) # calculate loss_cls_classes (only need pos samples) if pos_inds.sum() > 0: loss_cls_classes = self.loss_weight * self.cls_criterion( cls_score_classes[pos_inds], labels[pos_inds], label_weights[pos_inds], self.cum_samples[:self.num_classes], self.num_classes, self.p, self.q, self.eps, reduction, avg_factor) else: loss_cls_classes = cls_score_classes[pos_inds].sum() # calculate loss_cls_objectness loss_cls_objectness = self.loss_weight * cross_entropy( cls_score_objectness, obj_labels, label_weights, reduction, avg_factor) if self.return_dict: loss_cls = dict() loss_cls['loss_cls_objectness'] = loss_cls_objectness loss_cls['loss_cls_classes'] = loss_cls_classes else: loss_cls = loss_cls_classes + loss_cls_objectness return loss_cls ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/smooth_l1_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss """ assert beta > 0 if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() loss = torch.abs(pred - target) return loss @LOSSES.register_module() class SmoothL1Loss(nn.Module): """Smooth L1 loss. Args: beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". Defaults to "mean". loss_weight (float, optional): The weight of loss. """ def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * smooth_l1_loss( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox @LOSSES.register_module() class L1Loss(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * l1_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import functools import mmcv import torch import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) # none: 0, elementwise_mean:1, sum: 2 if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() @mmcv.jit(derivate=True, coderize=True) def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': # Avoid causing ZeroDivisionError when avg_factor is 0.0, # i.e., all labels of an image belong to ignore index. eps = torch.finfo(torch.float32).eps loss = loss.sum() / (avg_factor + eps) # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs): # get element-wise loss loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/varifocal_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weight_reduce_loss @mmcv.jit(derivate=True, coderize=True) def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss `_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # pred and target should be of the same size assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss `_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLoss, self).__init__() assert use_sigmoid is True, \ 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = self.loss_weight * varifocal_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=self.iou_weighted, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .bfp import BFP from .channel_mapper import ChannelMapper from .ct_resnet_neck import CTResNetNeck from .dilated_encoder import DilatedEncoder from .dyhead import DyHead from .fpg import FPG from .fpn import FPN from .fpn_carafe import FPN_CARAFE from .hrfpn import HRFPN from .nas_fpn import NASFPN from .nasfcos_fpn import NASFCOS_FPN from .pafpn import PAFPN from .rfp import RFP from .ssd_neck import SSDNeck from .yolo_neck import YOLOV3Neck from .yolox_pafpn import YOLOXPAFPN __all__ = [ 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder', 'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/bfp.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.cnn.bricks import NonLocal2d from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class BFP(BaseModule): """BFP (Balanced Feature Pyramids) BFP takes multi-level features as inputs and gather them into a single one, then refine the gathered feature and scatter the refined results to multi-level features. This module is used in Libra R-CNN (CVPR 2019), see the paper `Libra R-CNN: Towards Balanced Learning for Object Detection `_ for details. Args: in_channels (int): Number of input channels (feature maps of all levels should have the same channels). num_levels (int): Number of input feature levels. conv_cfg (dict): The config dict for convolution layers. norm_cfg (dict): The config dict for normalization layers. refine_level (int): Index of integration and refine level of BSF in multi-level features from bottom to top. refine_type (str): Type of the refine op, currently support [None, 'conv', 'non_local']. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform')): super(BFP, self).__init__(init_cfg) assert refine_type in [None, 'conv', 'non_local'] self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert 0 <= self.refine_level < self.num_levels if self.refine_type == 'conv': self.refine = ConvModule( self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif self.refine_type == 'non_local': self.refine = NonLocal2d( self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, inputs): """Forward function.""" assert len(inputs) == self.num_levels # step 1: gather multi-level features by resize and average feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if i < self.refine_level: gathered = F.adaptive_max_pool2d( inputs[i], output_size=gather_size) else: gathered = F.interpolate( inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = sum(feats) / len(feats) # step 2: refine gathered features if self.refine_type is not None: bsf = self.refine(bsf) # step 3: scatter refined features to multi-levels by a residual path outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if i < self.refine_level: residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append(residual + inputs[i]) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/channel_mapper.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class ChannelMapper(BaseModule): r"""Channel Mapper to reduce/increase channels of backbone features. This is used to reduce/increase channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). kernel_size (int, optional): kernel_size for reducing channels (used at each scale). Default: 3. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. act_cfg (dict, optional): Config dict for activation layer in ConvModule. Default: dict(type='ReLU'). num_outs (int, optional): Number of output feature maps. There would be extra_convs when num_outs larger than the length of in_channels. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = ChannelMapper(in_channels, 11, 3).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, kernel_size=3, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), num_outs=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform')): super(ChannelMapper, self).__init__(init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if num_outs is None: num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append( ConvModule( in_channel, out_channels, kernel_size, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if num_outs > len(in_channels): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if i == len(in_channels): in_channel = in_channels[-1] else: in_channel = out_channels self.extra_convs.append( ConvModule( in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.convs) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if i == 0: outs.append(self.extra_convs[0](inputs[-1])) else: outs.append(self.extra_convs[i](outs[-1])) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/ct_resnet_neck.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.builder import NECKS @NECKS.register_module() class CTResNetNeck(BaseModule): """The neck used in `CenterNet `_ for object classification and box regression. Args: in_channel (int): Number of input channels. num_deconv_filters (tuple[int]): Number of filters per stage. num_deconv_kernels (tuple[int]): Number of kernels per stage. use_dcn (bool): If True, use DCNv2. Default: True. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channel, num_deconv_filters, num_deconv_kernels, use_dcn=True, init_cfg=None): super(CTResNetNeck, self).__init__(init_cfg) assert len(num_deconv_filters) == len(num_deconv_kernels) self.fp16_enabled = False self.use_dcn = use_dcn self.in_channel = in_channel self.deconv_layers = self._make_deconv_layer(num_deconv_filters, num_deconv_kernels) def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels): """use deconv layers to upsample backbone's output.""" layers = [] for i in range(len(num_deconv_filters)): feat_channel = num_deconv_filters[i] conv_module = ConvModule( self.in_channel, feat_channel, 3, padding=1, conv_cfg=dict(type='DCNv2') if self.use_dcn else None, norm_cfg=dict(type='BN')) layers.append(conv_module) upsample_module = ConvModule( feat_channel, feat_channel, num_deconv_kernels[i], stride=2, padding=1, conv_cfg=dict(type='deconv'), norm_cfg=dict(type='BN')) layers.append(upsample_module) self.in_channel = feat_channel return nn.Sequential(*layers) def init_weights(self): for m in self.modules(): if isinstance(m, nn.ConvTranspose2d): # In order to be consistent with the source code, # reset the ConvTranspose2d initialization parameters m.reset_parameters() # Simulated bilinear upsampling kernel w = m.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2. * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = \ (1 - math.fabs(i / f - c)) * ( 1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # self.use_dcn is False elif not self.use_dcn and isinstance(m, nn.Conv2d): # In order to be consistent with the source code, # reset the Conv2d initialization parameters m.reset_parameters() @auto_fp16() def forward(self, inputs): assert isinstance(inputs, (list, tuple)) outs = self.deconv_layers(inputs[-1]) return outs, ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/dilated_encoder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm, normal_init) from torch.nn import BatchNorm2d from ..builder import NECKS class Bottleneck(nn.Module): """Bottleneck block for DilatedEncoder used in `YOLOF. `. The Bottleneck contains three ConvLayers and one residual connection. Args: in_channels (int): The number of input channels. mid_channels (int): The number of middle output channels. dilation (int): Dilation rate. norm_cfg (dict): Dictionary to construct and config norm layer. """ def __init__(self, in_channels, mid_channels, dilation, norm_cfg=dict(type='BN', requires_grad=True)): super(Bottleneck, self).__init__() self.conv1 = ConvModule( in_channels, mid_channels, 1, norm_cfg=norm_cfg) self.conv2 = ConvModule( mid_channels, mid_channels, 3, padding=dilation, dilation=dilation, norm_cfg=norm_cfg) self.conv3 = ConvModule( mid_channels, in_channels, 1, norm_cfg=norm_cfg) def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) out = out + identity return out @NECKS.register_module() class DilatedEncoder(nn.Module): """Dilated Encoder for YOLOF `. This module contains two types of components: - the original FPN lateral convolution layer and fpn convolution layer, which are 1x1 conv + 3x3 conv - the dilated residual block Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. block_mid_channels (int): The number of middle block output channels num_residual_blocks (int): The number of residual blocks. block_dilations (list): The list of residual blocks dilation. """ def __init__(self, in_channels, out_channels, block_mid_channels, num_residual_blocks, block_dilations): super(DilatedEncoder, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.block_mid_channels = block_mid_channels self.num_residual_blocks = num_residual_blocks self.block_dilations = block_dilations self._init_layers() def _init_layers(self): self.lateral_conv = nn.Conv2d( self.in_channels, self.out_channels, kernel_size=1) self.lateral_norm = BatchNorm2d(self.out_channels) self.fpn_conv = nn.Conv2d( self.out_channels, self.out_channels, kernel_size=3, padding=1) self.fpn_norm = BatchNorm2d(self.out_channels) encoder_blocks = [] for i in range(self.num_residual_blocks): dilation = self.block_dilations[i] encoder_blocks.append( Bottleneck( self.out_channels, self.block_mid_channels, dilation=dilation)) self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks) def init_weights(self): caffe2_xavier_init(self.lateral_conv) caffe2_xavier_init(self.fpn_conv) for m in [self.lateral_norm, self.fpn_norm]: constant_init(m, 1) for m in self.dilated_encoder_blocks.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) def forward(self, feature): out = self.lateral_norm(self.lateral_conv(feature[-1])) out = self.fpn_norm(self.fpn_conv(out)) return self.dilated_encoder_blocks(out), ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/dyhead.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (build_activation_layer, build_norm_layer, constant_init, normal_init) from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d from mmcv.runner import BaseModule from ..builder import NECKS from ..utils import DyReLU # Reference: # https://github.com/microsoft/DynamicHead # https://github.com/jshilong/SEPC class DyDCNv2(nn.Module): """ModulatedDeformConv2d with normalization layer used in DyHead. This module cannot be configured with `conv_cfg=dict(type='DCNv2')` because DyHead calculates offset and mask from middle-level feature. Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. stride (int | tuple[int], optional): Stride of the convolution. Default: 1. norm_cfg (dict, optional): Config dict for normalization layer. Default: dict(type='GN', num_groups=16, requires_grad=True). """ def __init__(self, in_channels, out_channels, stride=1, norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)): super().__init__() self.with_norm = norm_cfg is not None bias = not self.with_norm self.conv = ModulatedDeformConv2d( in_channels, out_channels, 3, stride=stride, padding=1, bias=bias) if self.with_norm: self.norm = build_norm_layer(norm_cfg, out_channels)[1] def forward(self, x, offset, mask): """Forward function.""" x = self.conv(x.contiguous(), offset.contiguous(), mask) if self.with_norm: x = self.norm(x) return x class DyHeadBlock(nn.Module): """DyHead Block with three types of attention. HSigmoid arguments in default act_cfg follow official code, not paper. https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. zero_init_offset (bool, optional): Whether to use zero init for `spatial_conv_offset`. Default: True. act_cfg (dict, optional): Config dict for the last activation layer of scale-aware attention. Default: dict(type='HSigmoid', bias=3.0, divisor=6.0). """ def __init__(self, in_channels, out_channels, zero_init_offset=True, act_cfg=dict(type='HSigmoid', bias=3.0, divisor=6.0)): super().__init__() self.zero_init_offset = zero_init_offset # (offset_x, offset_y, mask) * kernel_size_y * kernel_size_x self.offset_and_mask_dim = 3 * 3 * 3 self.offset_dim = 2 * 3 * 3 self.spatial_conv_high = DyDCNv2(in_channels, out_channels) self.spatial_conv_mid = DyDCNv2(in_channels, out_channels) self.spatial_conv_low = DyDCNv2(in_channels, out_channels, stride=2) self.spatial_conv_offset = nn.Conv2d( in_channels, self.offset_and_mask_dim, 3, padding=1) self.scale_attn_module = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(out_channels, 1, 1), nn.ReLU(inplace=True), build_activation_layer(act_cfg)) self.task_attn_module = DyReLU(out_channels) self._init_weights() def _init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, 0, 0.01) if self.zero_init_offset: constant_init(self.spatial_conv_offset, 0) def forward(self, x): """Forward function.""" outs = [] for level in range(len(x)): # calculate offset and mask of DCNv2 from middle-level feature offset_and_mask = self.spatial_conv_offset(x[level]) offset = offset_and_mask[:, :self.offset_dim, :, :] mask = offset_and_mask[:, self.offset_dim:, :, :].sigmoid() mid_feat = self.spatial_conv_mid(x[level], offset, mask) sum_feat = mid_feat * self.scale_attn_module(mid_feat) summed_levels = 1 if level > 0: low_feat = self.spatial_conv_low(x[level - 1], offset, mask) sum_feat = sum_feat + \ low_feat * self.scale_attn_module(low_feat) summed_levels += 1 if level < len(x) - 1: # this upsample order is weird, but faster than natural order # https://github.com/microsoft/DynamicHead/issues/25 high_feat = F.interpolate( self.spatial_conv_high(x[level + 1], offset, mask), size=x[level].shape[-2:], mode='bilinear', align_corners=True) sum_feat = sum_feat + high_feat * \ self.scale_attn_module(high_feat) summed_levels += 1 outs.append(self.task_attn_module(sum_feat / summed_levels)) return outs @NECKS.register_module() class DyHead(BaseModule): """DyHead neck consisting of multiple DyHead Blocks. See `Dynamic Head: Unifying Object Detection Heads with Attentions `_ for details. Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. num_blocks (int, optional): Number of DyHead Blocks. Default: 6. zero_init_offset (bool, optional): Whether to use zero init for `spatial_conv_offset`. Default: True. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. """ def __init__(self, in_channels, out_channels, num_blocks=6, zero_init_offset=True, init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.out_channels = out_channels self.num_blocks = num_blocks self.zero_init_offset = zero_init_offset dyhead_blocks = [] for i in range(num_blocks): in_channels = self.in_channels if i == 0 else self.out_channels dyhead_blocks.append( DyHeadBlock( in_channels, self.out_channels, zero_init_offset=zero_init_offset)) self.dyhead_blocks = nn.Sequential(*dyhead_blocks) def forward(self, inputs): """Forward function.""" assert isinstance(inputs, (tuple, list)) outs = self.dyhead_blocks(inputs) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/fpg.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS class Transition(BaseModule): """Base class for transition. Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. """ def __init__(self, in_channels, out_channels, init_cfg=None): super().__init__(init_cfg) self.in_channels = in_channels self.out_channels = out_channels def forward(x): pass class UpInterpolationConv(Transition): """A transition used for up-sampling. Up-sample the input by interpolation then refines the feature by a convolution layer. Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. scale_factor (int): Up-sampling factor. Default: 2. mode (int): Interpolation mode. Default: nearest. align_corners (bool): Whether align corners when interpolation. Default: None. kernel_size (int): Kernel size for the conv. Default: 3. """ def __init__(self, in_channels, out_channels, scale_factor=2, mode='nearest', align_corners=None, kernel_size=3, init_cfg=None, **kwargs): super().__init__(in_channels, out_channels, init_cfg) self.mode = mode self.scale_factor = scale_factor self.align_corners = align_corners self.conv = ConvModule( in_channels, out_channels, kernel_size, padding=(kernel_size - 1) // 2, **kwargs) def forward(self, x): x = F.interpolate( x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) x = self.conv(x) return x class LastConv(Transition): """A transition used for refining the output of the last stage. Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. num_inputs (int): Number of inputs of the FPN features. kernel_size (int): Kernel size for the conv. Default: 3. """ def __init__(self, in_channels, out_channels, num_inputs, kernel_size=3, init_cfg=None, **kwargs): super().__init__(in_channels, out_channels, init_cfg) self.num_inputs = num_inputs self.conv_out = ConvModule( in_channels, out_channels, kernel_size, padding=(kernel_size - 1) // 2, **kwargs) def forward(self, inputs): assert len(inputs) == self.num_inputs return self.conv_out(inputs[-1]) @NECKS.register_module() class FPG(BaseModule): """FPG. Implementation of `Feature Pyramid Grids (FPG) `_. This implementation only gives the basic structure stated in the paper. But users can implement different type of transitions to fully explore the the potential power of the structure of FPG. Args: in_channels (int): Number of input channels (feature maps of all levels should have the same channels). out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. stack_times (int): The number of times the pyramid architecture will be stacked. paths (list[str]): Specify the path order of each stack level. Each element in the list should be either 'bu' (bottom-up) or 'td' (top-down). inter_channels (int): Number of inter channels. same_up_trans (dict): Transition that goes down at the same stage. same_down_trans (dict): Transition that goes up at the same stage. across_lateral_trans (dict): Across-pathway same-stage across_down_trans (dict): Across-pathway bottom-up connection. across_up_trans (dict): Across-pathway top-down connection. across_skip_trans (dict): Across-pathway skip connection. output_trans (dict): Transition that trans the output of the last stage. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool): It decides whether to add conv layers on top of the original feature maps. Default to False. If True, its actual mode is specified by `extra_convs_on_inputs`. norm_cfg (dict): Config dict for normalization layer. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. """ transition_types = { 'conv': ConvModule, 'interpolation_conv': UpInterpolationConv, 'last_conv': LastConv, } def __init__(self, in_channels, out_channels, num_outs, stack_times, paths, inter_channels=None, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1), across_lateral_trans=dict(type='conv', kernel_size=1), across_down_trans=dict(type='conv', kernel_size=3), across_up_trans=None, across_skip_trans=dict(type='identity'), output_trans=dict(type='last_conv', kernel_size=3), start_level=0, end_level=-1, add_extra_convs=False, norm_cfg=None, skip_inds=None, init_cfg=[ dict(type='Caffe2Xavier', layer='Conv2d'), dict( type='Constant', layer=[ '_BatchNorm', '_InstanceNorm', 'GroupNorm', 'LayerNorm' ], val=1.0) ]): super(FPG, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs if inter_channels is None: self.inter_channels = [out_channels for _ in range(num_outs)] elif isinstance(inter_channels, int): self.inter_channels = [inter_channels for _ in range(num_outs)] else: assert isinstance(inter_channels, list) assert len(inter_channels) == num_outs self.inter_channels = inter_channels self.stack_times = stack_times self.paths = paths assert isinstance(paths, list) and len(paths) == stack_times for d in paths: assert d in ('bu', 'td') self.same_down_trans = same_down_trans self.same_up_trans = same_up_trans self.across_lateral_trans = across_lateral_trans self.across_down_trans = across_down_trans self.across_up_trans = across_up_trans self.output_trans = output_trans self.across_skip_trans = across_skip_trans self.with_bias = norm_cfg is None # skip inds must be specified if across skip trans is not None if self.across_skip_trans is not None: skip_inds is not None self.skip_inds = skip_inds assert len(self.skip_inds[0]) <= self.stack_times if end_level == -1 or end_level == self.num_ins - 1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level is not the last level, no extra level is allowed self.backbone_end_level = end_level + 1 assert end_level < self.num_ins assert num_outs == end_level - start_level + 1 self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs # build lateral 1x1 convs to reduce channels self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = nn.Conv2d(self.in_channels[i], self.inter_channels[i - self.start_level], 1) self.lateral_convs.append(l_conv) extra_levels = num_outs - self.backbone_end_level + self.start_level self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): if self.add_extra_convs: fpn_idx = self.backbone_end_level - self.start_level + i extra_conv = nn.Conv2d( self.inter_channels[fpn_idx - 1], self.inter_channels[fpn_idx], 3, stride=2, padding=1) self.extra_downsamples.append(extra_conv) else: self.extra_downsamples.append(nn.MaxPool2d(1, stride=2)) self.fpn_transitions = nn.ModuleList() # stack times for s in range(self.stack_times): stage_trans = nn.ModuleList() # num of feature levels for i in range(self.num_outs): # same, across_lateral, across_down, across_up trans = nn.ModuleDict() if s in self.skip_inds[i]: stage_trans.append(trans) continue # build same-stage down trans (used in bottom-up paths) if i == 0 or self.same_up_trans is None: same_up_trans = None else: same_up_trans = self.build_trans( self.same_up_trans, self.inter_channels[i - 1], self.inter_channels[i]) trans['same_up'] = same_up_trans # build same-stage up trans (used in top-down paths) if i == self.num_outs - 1 or self.same_down_trans is None: same_down_trans = None else: same_down_trans = self.build_trans( self.same_down_trans, self.inter_channels[i + 1], self.inter_channels[i]) trans['same_down'] = same_down_trans # build across lateral trans across_lateral_trans = self.build_trans( self.across_lateral_trans, self.inter_channels[i], self.inter_channels[i]) trans['across_lateral'] = across_lateral_trans # build across down trans if i == self.num_outs - 1 or self.across_down_trans is None: across_down_trans = None else: across_down_trans = self.build_trans( self.across_down_trans, self.inter_channels[i + 1], self.inter_channels[i]) trans['across_down'] = across_down_trans # build across up trans if i == 0 or self.across_up_trans is None: across_up_trans = None else: across_up_trans = self.build_trans( self.across_up_trans, self.inter_channels[i - 1], self.inter_channels[i]) trans['across_up'] = across_up_trans if self.across_skip_trans is None: across_skip_trans = None else: across_skip_trans = self.build_trans( self.across_skip_trans, self.inter_channels[i - 1], self.inter_channels[i]) trans['across_skip'] = across_skip_trans # build across_skip trans stage_trans.append(trans) self.fpn_transitions.append(stage_trans) self.output_transition = nn.ModuleList() # output levels for i in range(self.num_outs): trans = self.build_trans( self.output_trans, self.inter_channels[i], self.out_channels, num_inputs=self.stack_times + 1) self.output_transition.append(trans) self.relu = nn.ReLU(inplace=True) def build_trans(self, cfg, in_channels, out_channels, **extra_args): cfg_ = cfg.copy() trans_type = cfg_.pop('type') trans_cls = self.transition_types[trans_type] return trans_cls(in_channels, out_channels, **cfg_, **extra_args) def fuse(self, fuse_dict): out = None for item in fuse_dict.values(): if item is not None: if out is None: out = item else: out = out + item return out def forward(self, inputs): assert len(inputs) == len(self.in_channels) # build all levels from original feature maps feats = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] for downsample in self.extra_downsamples: feats.append(downsample(feats[-1])) outs = [feats] for i in range(self.stack_times): current_outs = outs[-1] next_outs = [] direction = self.paths[i] for j in range(self.num_outs): if i in self.skip_inds[j]: next_outs.append(outs[-1][j]) continue # feature level if direction == 'td': lvl = self.num_outs - j - 1 else: lvl = j # get transitions if direction == 'td': same_trans = self.fpn_transitions[i][lvl]['same_down'] else: same_trans = self.fpn_transitions[i][lvl]['same_up'] across_lateral_trans = self.fpn_transitions[i][lvl][ 'across_lateral'] across_down_trans = self.fpn_transitions[i][lvl]['across_down'] across_up_trans = self.fpn_transitions[i][lvl]['across_up'] across_skip_trans = self.fpn_transitions[i][lvl]['across_skip'] # init output to_fuse = dict( same=None, lateral=None, across_up=None, across_down=None) # same downsample/upsample if same_trans is not None: to_fuse['same'] = same_trans(next_outs[-1]) # across lateral if across_lateral_trans is not None: to_fuse['lateral'] = across_lateral_trans( current_outs[lvl]) # across downsample if lvl > 0 and across_up_trans is not None: to_fuse['across_up'] = across_up_trans(current_outs[lvl - 1]) # across upsample if (lvl < self.num_outs - 1 and across_down_trans is not None): to_fuse['across_down'] = across_down_trans( current_outs[lvl + 1]) if across_skip_trans is not None: to_fuse['across_skip'] = across_skip_trans(outs[0][lvl]) x = self.fuse(to_fuse) next_outs.append(x) if direction == 'td': outs.append(next_outs[::-1]) else: outs.append(next_outs) # output trans final_outs = [] for i in range(self.num_outs): lvl_out_list = [] for s in range(len(outs)): lvl_out_list.append(outs[s][i]) lvl_out = self.output_transition[i](lvl_out_list) final_outs.append(lvl_out) return final_outs ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/fpn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16 from ..builder import NECKS @NECKS.register_module() class FPN(BaseModule): r"""Feature Pyramid Network. This is an implementation of paper `Feature Pyramid Networks for Object Detection `_. Args: in_channels (list[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). num_outs (int): Number of output scales. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool | str): If bool, it decides whether to add conv layers on top of the original feature maps. Default to False. If True, it is equivalent to `add_extra_convs='on_input'`. If str, it specifies the source feature map of the extra convs. Only the following options are allowed - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - 'on_lateral': Last feature map after lateral convs. - 'on_output': The last output feature map after fpn convs. relu_before_extra_convs (bool): Whether to apply relu before the extra conv. Default: False. no_norm_on_lateral (bool): Whether to apply norm on lateral. Default: False. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. act_cfg (dict): Config dict for activation layer in ConvModule. Default: None. upsample_cfg (dict): Config dict for interpolate layer. Default: dict(mode='nearest'). init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = FPN(in_channels, 11, len(in_channels)).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest'), init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform')): super(FPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False self.upsample_cfg = upsample_cfg.copy() if end_level == -1 or end_level == self.num_ins - 1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level is not the last level, no extra level is allowed self.backbone_end_level = end_level + 1 assert end_level < self.num_ins assert num_outs == end_level - start_level + 1 self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs assert isinstance(add_extra_convs, (str, bool)) if isinstance(add_extra_convs, str): # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') elif add_extra_convs: # True self.add_extra_convs = 'on_input' self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, act_cfg=act_cfg, inplace=False) fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) extra_levels = num_outs - self.backbone_end_level + self.start_level if self.add_extra_convs and extra_levels >= 1: for i in range(extra_levels): if i == 0 and self.add_extra_convs == 'on_input': in_channels = self.in_channels[self.backbone_end_level - 1] else: in_channels = out_channels extra_fpn_conv = ConvModule( in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.fpn_convs.append(extra_fpn_conv) @auto_fp16() def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): # In some cases, fixing `scale factor` (e.g. 2) is preferred, but # it cannot co-exist with `size` in `F.interpolate`. if 'scale_factor' in self.upsample_cfg: # fix runtime error of "+=" inplace operation in PyTorch 1.10 laterals[i - 1] = laterals[i - 1] + F.interpolate( laterals[i], **self.upsample_cfg) else: prev_shape = laterals[i - 1].shape[2:] laterals[i - 1] = laterals[i - 1] + F.interpolate( laterals[i], size=prev_shape, **self.upsample_cfg) # build outputs # part 1: from original levels outs = [ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) ] # part 2: add extra levels if self.num_outs > len(outs): # use max pool to get more levels on top of outputs # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) # add conv layers on top of original feature maps (RetinaNet) else: if self.add_extra_convs == 'on_input': extra_source = inputs[self.backbone_end_level - 1] elif self.add_extra_convs == 'on_lateral': extra_source = laterals[-1] elif self.add_extra_convs == 'on_output': extra_source = outs[-1] else: raise NotImplementedError outs.append(self.fpn_convs[used_backbone_levels](extra_source)) for i in range(used_backbone_levels + 1, self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[-1]))) else: outs.append(self.fpn_convs[i](outs[-1])) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/fpn_carafe.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init from mmcv.ops.carafe import CARAFEPack from mmcv.runner import BaseModule, ModuleList from ..builder import NECKS @NECKS.register_module() class FPN_CARAFE(BaseModule): """FPN_CARAFE is a more flexible implementation of FPN. It allows more choice for upsample methods during the top-down pathway. It can reproduce the performance of ICCV 2019 paper CARAFE: Content-Aware ReAssembly of FEatures Please refer to https://arxiv.org/abs/1905.02188 for more details. Args: in_channels (list[int]): Number of channels for each input feature map. out_channels (int): Output channels of feature pyramids. num_outs (int): Number of output stages. start_level (int): Start level of feature pyramids. (Default: 0) end_level (int): End level of feature pyramids. (Default: -1 indicates the last level). norm_cfg (dict): Dictionary to construct and config norm layer. activate (str): Type of activation function in ConvModule (Default: None indicates w/o activation). order (dict): Order of components in ConvModule. upsample (str): Type of upsample layer. upsample_cfg (dict): Dictionary to construct and config upsample layer. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, norm_cfg=None, act_cfg=None, order=('conv', 'norm', 'act'), upsample_cfg=dict( type='carafe', up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1), init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(FPN_CARAFE, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.with_bias = norm_cfg is None self.upsample_cfg = upsample_cfg.copy() self.upsample = self.upsample_cfg.get('type') self.relu = nn.ReLU(inplace=False) self.order = order assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')] assert self.upsample in [ 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None ] if self.upsample in ['deconv', 'pixel_shuffle']: assert hasattr( self.upsample_cfg, 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0 self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel') if end_level == -1 or end_level == self.num_ins - 1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level is not the last level, no extra level is allowed self.backbone_end_level = end_level + 1 assert end_level < self.num_ins assert num_outs == end_level - start_level + 1 self.start_level = start_level self.end_level = end_level self.lateral_convs = ModuleList() self.fpn_convs = ModuleList() self.upsample_modules = ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, norm_cfg=norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, norm_cfg=self.norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) if i != self.backbone_end_level - 1: upsample_cfg_ = self.upsample_cfg.copy() if self.upsample == 'deconv': upsample_cfg_.update( in_channels=out_channels, out_channels=out_channels, kernel_size=self.upsample_kernel, stride=2, padding=(self.upsample_kernel - 1) // 2, output_padding=(self.upsample_kernel - 1) // 2) elif self.upsample == 'pixel_shuffle': upsample_cfg_.update( in_channels=out_channels, out_channels=out_channels, scale_factor=2, upsample_kernel=self.upsample_kernel) elif self.upsample == 'carafe': upsample_cfg_.update(channels=out_channels, scale_factor=2) else: # suppress warnings align_corners = (None if self.upsample == 'nearest' else False) upsample_cfg_.update( scale_factor=2, mode=self.upsample, align_corners=align_corners) upsample_module = build_upsample_layer(upsample_cfg_) self.upsample_modules.append(upsample_module) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) extra_out_levels = ( num_outs - self.backbone_end_level + self.start_level) if extra_out_levels >= 1: for i in range(extra_out_levels): in_channels = ( self.in_channels[self.backbone_end_level - 1] if i == 0 else out_channels) extra_l_conv = ConvModule( in_channels, out_channels, 3, stride=2, padding=1, norm_cfg=norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) if self.upsample == 'deconv': upsampler_cfg_ = dict( in_channels=out_channels, out_channels=out_channels, kernel_size=self.upsample_kernel, stride=2, padding=(self.upsample_kernel - 1) // 2, output_padding=(self.upsample_kernel - 1) // 2) elif self.upsample == 'pixel_shuffle': upsampler_cfg_ = dict( in_channels=out_channels, out_channels=out_channels, scale_factor=2, upsample_kernel=self.upsample_kernel) elif self.upsample == 'carafe': upsampler_cfg_ = dict( channels=out_channels, scale_factor=2, **self.upsample_cfg) else: # suppress warnings align_corners = (None if self.upsample == 'nearest' else False) upsampler_cfg_ = dict( scale_factor=2, mode=self.upsample, align_corners=align_corners) upsampler_cfg_['type'] = self.upsample upsample_module = build_upsample_layer(upsampler_cfg_) extra_fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, norm_cfg=self.norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) self.upsample_modules.append(upsample_module) self.fpn_convs.append(extra_fpn_conv) self.lateral_convs.append(extra_l_conv) # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): """Initialize the weights of module.""" super(FPN_CARAFE, self).init_weights() for m in self.modules(): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): xavier_init(m, distribution='uniform') for m in self.modules(): if isinstance(m, CARAFEPack): m.init_weights() def slice_as(self, src, dst): """Slice ``src`` as ``dst`` Note: ``src`` should have the same or larger size than ``dst``. Args: src (torch.Tensor): Tensors to be sliced. dst (torch.Tensor): ``src`` will be sliced to have the same size as ``dst``. Returns: torch.Tensor: Sliced tensor. """ assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3)) if src.size(2) == dst.size(2) and src.size(3) == dst.size(3): return src else: return src[:, :, :dst.size(2), :dst.size(3)] def tensor_add(self, a, b): """Add tensors ``a`` and ``b`` that might have different sizes.""" if a.size() == b.size(): c = a + b else: c = a + self.slice_as(b, a) return c def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.in_channels) # build laterals laterals = [] for i, lateral_conv in enumerate(self.lateral_convs): if i <= self.backbone_end_level - self.start_level: input = inputs[min(i + self.start_level, len(inputs) - 1)] else: input = laterals[-1] lateral = lateral_conv(input) laterals.append(lateral) # build top-down path for i in range(len(laterals) - 1, 0, -1): if self.upsample is not None: upsample_feat = self.upsample_modules[i - 1](laterals[i]) else: upsample_feat = laterals[i] laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat) # build outputs num_conv_outs = len(self.fpn_convs) outs = [] for i in range(num_conv_outs): out = self.fpn_convs[i](laterals[i]) outs.append(out) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/hrfpn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.utils.checkpoint import checkpoint from ..builder import NECKS @NECKS.register_module() class HRFPN(BaseModule): """HRFPN (High Resolution Feature Pyramids) paper: `High-Resolution Representations for Labeling Pixels and Regions `_. Args: in_channels (list): number of channels for each branch. out_channels (int): output channels of feature pyramids. num_outs (int): number of output stages. pooling_type (str): pooling for generating feature pyramids from {MAX, AVG}. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. stride (int): stride of 3x3 convolutional layers init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(HRFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule( sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, act_cfg=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append( ConvModule( out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, act_cfg=None)) if pooling_type == 'MAX': self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d def forward(self, inputs): """Forward function.""" assert len(inputs) == self.num_ins outs = [inputs[0]] for i in range(1, self.num_ins): outs.append( F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) out = torch.cat(outs, dim=1) if out.requires_grad and self.with_cp: out = checkpoint(self.reduction_conv, out) else: out = self.reduction_conv(out) outs = [out] for i in range(1, self.num_outs): outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) outputs = [] for i in range(self.num_outs): if outs[i].requires_grad and self.with_cp: tmp_out = checkpoint(self.fpn_convs[i], outs[i]) else: tmp_out = self.fpn_convs[i](outs[i]) outputs.append(tmp_out) return tuple(outputs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/nas_fpn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell from mmcv.runner import BaseModule, ModuleList from ..builder import NECKS @NECKS.register_module() class NASFPN(BaseModule): """NAS-FPN. Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection `_ Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. stack_times (int): The number of times the pyramid architecture will be stacked. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool): It decides whether to add conv layers on top of the original feature maps. Default to False. If True, its actual mode is specified by `extra_convs_on_inputs`. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=-1, add_extra_convs=False, norm_cfg=None, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(NASFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) # num of input feature levels self.num_outs = num_outs # num of output feature levels self.stack_times = stack_times self.norm_cfg = norm_cfg if end_level == -1 or end_level == self.num_ins - 1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level is not the last level, no extra level is allowed self.backbone_end_level = end_level + 1 assert end_level < self.num_ins assert num_outs == end_level - start_level + 1 self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs # add lateral connections self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.lateral_convs.append(l_conv) # add extra downsample layers (stride-2 pooling or conv) extra_levels = num_outs - self.backbone_end_level + self.start_level self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_conv = ConvModule( out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.extra_downsamples.append( nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) # add NAS FPN connections self.fpn_stages = ModuleList() for _ in range(self.stack_times): stage = nn.ModuleDict() # gp(p6, p4) -> p4_1 stage['gp_64_4'] = GlobalPoolingCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p4_1, p4) -> p4_2 stage['sum_44_4'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p4_2, p3) -> p3_out stage['sum_43_3'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p3_out, p4_2) -> p4_out stage['sum_34_4'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p5, gp(p4_out, p3_out)) -> p5_out stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) stage['sum_55_5'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p7, gp(p5_out, p4_2)) -> p7_out stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) stage['sum_77_7'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # gp(p7_out, p5_out) -> p6_out stage['gp_75_6'] = GlobalPoolingCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) self.fpn_stages.append(stage) def forward(self, inputs): """Forward function.""" # build P3-P5 feats = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build P6-P7 on top of P5 for downsample in self.extra_downsamples: feats.append(downsample(feats[-1])) p3, p4, p5, p6, p7 = feats for stage in self.fpn_stages: # gp(p6, p4) -> p4_1 p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:]) # sum(p4_1, p4) -> p4_2 p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:]) # sum(p4_2, p3) -> p3_out p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:]) # sum(p3_out, p4_2) -> p4_out p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:]) # sum(p5, gp(p4_out, p3_out)) -> p5_out p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:]) p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:]) # sum(p7, gp(p5_out, p4_2)) -> p7_out p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:]) p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:]) # gp(p7_out, p5_out) -> p6_out p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:]) return p3, p4, p5, p6, p7 ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/nasfcos_fpn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, caffe2_xavier_init from mmcv.ops.merge_cells import ConcatCell from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class NASFCOS_FPN(BaseModule): """FPN structure in NASFPN. Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for Object Detection `_ Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool): It decides whether to add conv layers on top of the original feature maps. Default to False. If True, its actual mode is specified by `extra_convs_on_inputs`. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, num_outs, start_level=1, end_level=-1, add_extra_convs=False, conv_cfg=None, norm_cfg=None, init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(NASFCOS_FPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg if end_level == -1 or end_level == self.num_ins - 1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level is not the last level, no extra level is allowed self.backbone_end_level = end_level + 1 assert end_level < self.num_ins assert num_outs == end_level - start_level + 1 self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.adapt_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): adapt_conv = ConvModule( in_channels[i], out_channels, 1, stride=1, padding=0, bias=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU', inplace=False)) self.adapt_convs.append(adapt_conv) # C2 is omitted according to the paper extra_levels = num_outs - self.backbone_end_level + self.start_level def build_concat_cell(with_input1_conv, with_input2_conv): cell_conv_cfg = dict( kernel_size=1, padding=0, bias=False, groups=out_channels) return ConcatCell( in_channels=out_channels, out_channels=out_channels, with_out_conv=True, out_conv_cfg=cell_conv_cfg, out_norm_cfg=dict(type='BN'), out_conv_order=('norm', 'act', 'conv'), with_input1_conv=with_input1_conv, with_input2_conv=with_input2_conv, input_conv_cfg=conv_cfg, input_norm_cfg=norm_cfg, upsample_mode='nearest') # Denote c3=f0, c4=f1, c5=f2 for convince self.fpn = nn.ModuleDict() self.fpn['c22_1'] = build_concat_cell(True, True) self.fpn['c22_2'] = build_concat_cell(True, True) self.fpn['c32'] = build_concat_cell(True, False) self.fpn['c02'] = build_concat_cell(True, False) self.fpn['c42'] = build_concat_cell(True, True) self.fpn['c36'] = build_concat_cell(True, True) self.fpn['c61'] = build_concat_cell(True, True) # f9 self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_act_cfg = None if i == 0 \ else dict(type='ReLU', inplace=False) self.extra_downsamples.append( ConvModule( out_channels, out_channels, 3, stride=2, padding=1, act_cfg=extra_act_cfg, order=('act', 'norm', 'conv'))) def forward(self, inputs): """Forward function.""" feats = [ adapt_conv(inputs[i + self.start_level]) for i, adapt_conv in enumerate(self.adapt_convs) ] for (i, module_name) in enumerate(self.fpn): idx_1, idx_2 = int(module_name[1]), int(module_name[2]) res = self.fpn[module_name](feats[idx_1], feats[idx_2]) feats.append(res) ret = [] for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5 feats1, feats2 = feats[idx], feats[5] feats2_resize = F.interpolate( feats2, size=feats1.size()[2:], mode='bilinear', align_corners=False) feats_sum = feats1 + feats2_resize ret.append( F.interpolate( feats_sum, size=inputs[input_idx].size()[2:], mode='bilinear', align_corners=False)) for submodule in self.extra_downsamples: ret.append(submodule(ret[-1])) return tuple(ret) def init_weights(self): """Initialize the weights of module.""" super(NASFCOS_FPN, self).init_weights() for module in self.fpn.values(): if hasattr(module, 'conv_out'): caffe2_xavier_init(module.out_conv.conv) for modules in [ self.adapt_convs.modules(), self.extra_downsamples.modules() ]: for module in modules: if isinstance(module, nn.Conv2d): caffe2_xavier_init(module) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/pafpn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import auto_fp16 from ..builder import NECKS from .fpn import FPN @NECKS.register_module() class PAFPN(FPN): """Path Aggregation Network for Instance Segmentation. This is an implementation of the `PAFPN in Path Aggregation Network `_. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool | str): If bool, it decides whether to add conv layers on top of the original feature maps. Default to False. If True, it is equivalent to `add_extra_convs='on_input'`. If str, it specifies the source feature map of the extra convs. Only the following options are allowed - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - 'on_lateral': Last feature map after lateral convs. - 'on_output': The last output feature map after fpn convs. relu_before_extra_convs (bool): Whether to apply relu before the extra conv. Default: False. no_norm_on_lateral (bool): Whether to apply norm on lateral. Default: False. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. act_cfg (str): Config dict for activation layer in ConvModule. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform')): super(PAFPN, self).__init__( in_channels, out_channels, num_outs, start_level, end_level, add_extra_convs, relu_before_extra_convs, no_norm_on_lateral, conv_cfg, norm_cfg, act_cfg, init_cfg=init_cfg) # add extra bottom up pathway self.downsample_convs = nn.ModuleList() self.pafpn_convs = nn.ModuleList() for i in range(self.start_level + 1, self.backbone_end_level): d_conv = ConvModule( out_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) pafpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.downsample_convs.append(d_conv) self.pafpn_convs.append(pafpn_conv) @auto_fp16() def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): prev_shape = laterals[i - 1].shape[2:] # fix runtime error of "+=" inplace operation in PyTorch 1.10 laterals[i - 1] = laterals[i - 1] + F.interpolate( laterals[i], size=prev_shape, mode='nearest') # build outputs # part 1: from original levels inter_outs = [ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) ] # part 2: add bottom-up path for i in range(0, used_backbone_levels - 1): inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i]) outs = [] outs.append(inter_outs[0]) outs.extend([ self.pafpn_convs[i - 1](inter_outs[i]) for i in range(1, used_backbone_levels) ]) # part 3: add extra levels if self.num_outs > len(outs): # use max pool to get more levels on top of outputs # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) # add conv layers on top of original feature maps (RetinaNet) else: if self.add_extra_convs == 'on_input': orig = inputs[self.backbone_end_level - 1] outs.append(self.fpn_convs[used_backbone_levels](orig)) elif self.add_extra_convs == 'on_lateral': outs.append(self.fpn_convs[used_backbone_levels]( laterals[-1])) elif self.add_extra_convs == 'on_output': outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) else: raise NotImplementedError for i in range(used_backbone_levels + 1, self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[-1]))) else: outs.append(self.fpn_convs[i](outs[-1])) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/rfp.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import constant_init, xavier_init from mmcv.runner import BaseModule, ModuleList from ..builder import NECKS, build_backbone from .fpn import FPN class ASPP(BaseModule): """ASPP (Atrous Spatial Pyramid Pooling) This is an implementation of the ASPP module used in DetectoRS (https://arxiv.org/pdf/2006.02334.pdf) Args: in_channels (int): Number of input channels. out_channels (int): Number of channels produced by this module dilations (tuple[int]): Dilations of the four branches. Default: (1, 3, 6, 1) init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1), init_cfg=dict(type='Kaiming', layer='Conv2d')): super().__init__(init_cfg) assert dilations[-1] == 1 self.aspp = nn.ModuleList() for dilation in dilations: kernel_size = 3 if dilation > 1 else 1 padding = dilation if dilation > 1 else 0 conv = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=1, dilation=dilation, padding=padding, bias=True) self.aspp.append(conv) self.gap = nn.AdaptiveAvgPool2d(1) def forward(self, x): avg_x = self.gap(x) out = [] for aspp_idx in range(len(self.aspp)): inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x out.append(F.relu_(self.aspp[aspp_idx](inp))) out[-1] = out[-1].expand_as(out[-2]) out = torch.cat(out, dim=1) return out @NECKS.register_module() class RFP(FPN): """RFP (Recursive Feature Pyramid) This is an implementation of RFP in `DetectoRS `_. Different from standard FPN, the input of RFP should be multi level features along with origin input image of backbone. Args: rfp_steps (int): Number of unrolled steps of RFP. rfp_backbone (dict): Configuration of the backbone for RFP. aspp_out_channels (int): Number of output channels of ASPP module. aspp_dilations (tuple[int]): Dilation rates of four branches. Default: (1, 3, 6, 1) init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, rfp_steps, rfp_backbone, aspp_out_channels, aspp_dilations=(1, 3, 6, 1), init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg, **kwargs) self.rfp_steps = rfp_steps # Be careful! Pretrained weights cannot be loaded when use # nn.ModuleList self.rfp_modules = ModuleList() for rfp_idx in range(1, rfp_steps): rfp_module = build_backbone(rfp_backbone) self.rfp_modules.append(rfp_module) self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels, aspp_dilations) self.rfp_weight = nn.Conv2d( self.out_channels, 1, kernel_size=1, stride=1, padding=0, bias=True) def init_weights(self): # Avoid using super().init_weights(), which may alter the default # initialization of the modules in self.rfp_modules that have missing # keys in the pretrained checkpoint. for convs in [self.lateral_convs, self.fpn_convs]: for m in convs.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') for rfp_idx in range(self.rfp_steps - 1): self.rfp_modules[rfp_idx].init_weights() constant_init(self.rfp_weight, 0) def forward(self, inputs): inputs = list(inputs) assert len(inputs) == len(self.in_channels) + 1 # +1 for input image img = inputs.pop(0) # FPN forward x = super().forward(tuple(inputs)) for rfp_idx in range(self.rfp_steps - 1): rfp_feats = [x[0]] + list( self.rfp_aspp(x[i]) for i in range(1, len(x))) x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats) # FPN forward x_idx = super().forward(x_idx) x_new = [] for ft_idx in range(len(x_idx)): add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx])) x_new.append(add_weight * x_idx[ft_idx] + (1 - add_weight) * x[ft_idx]) x = x_new return x ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/ssd_neck.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class SSDNeck(BaseModule): """Extra layers of SSD backbone to generate multi-scale feature maps. Args: in_channels (Sequence[int]): Number of input channels per scale. out_channels (Sequence[int]): Number of output channels per scale. level_strides (Sequence[int]): Stride of 3x3 conv per level. level_paddings (Sequence[int]): Padding size of 3x3 conv per level. l2_norm_scale (float|None): L2 normalization layer init scale. If None, not use L2 normalization on the first input feature. last_kernel_size (int): Kernel size of the last conv layer. Default: 3. use_depthwise (bool): Whether to use DepthwiseSeparableConv. Default: False. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: None. act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU'). init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, level_strides, level_paddings, l2_norm_scale=20., last_kernel_size=3, use_depthwise=False, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), init_cfg=[ dict( type='Xavier', distribution='uniform', layer='Conv2d'), dict(type='Constant', val=1, layer='BatchNorm2d'), ]): super(SSDNeck, self).__init__(init_cfg) assert len(out_channels) > len(in_channels) assert len(out_channels) - len(in_channels) == len(level_strides) assert len(level_strides) == len(level_paddings) assert in_channels == out_channels[:len(in_channels)] if l2_norm_scale: self.l2_norm = L2Norm(in_channels[0], l2_norm_scale) self.init_cfg += [ dict( type='Constant', val=self.l2_norm.scale, override=dict(name='l2_norm')) ] self.extra_layers = nn.ModuleList() extra_layer_channels = out_channels[len(in_channels):] second_conv = DepthwiseSeparableConvModule if \ use_depthwise else ConvModule for i, (out_channel, stride, padding) in enumerate( zip(extra_layer_channels, level_strides, level_paddings)): kernel_size = last_kernel_size \ if i == len(extra_layer_channels) - 1 else 3 per_lvl_convs = nn.Sequential( ConvModule( out_channels[len(in_channels) - 1 + i], out_channel // 2, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), second_conv( out_channel // 2, out_channel, kernel_size, stride=stride, padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.extra_layers.append(per_lvl_convs) def forward(self, inputs): """Forward function.""" outs = [feat for feat in inputs] if hasattr(self, 'l2_norm'): outs[0] = self.l2_norm(outs[0]) feat = outs[-1] for layer in self.extra_layers: feat = layer(feat) outs.append(feat) return tuple(outs) class L2Norm(nn.Module): def __init__(self, n_dims, scale=20., eps=1e-10): """L2 normalization layer. Args: n_dims (int): Number of dimensions to be normalized scale (float, optional): Defaults to 20.. eps (float, optional): Used to avoid division by zero. Defaults to 1e-10. """ super(L2Norm, self).__init__() self.n_dims = n_dims self.weight = nn.Parameter(torch.Tensor(self.n_dims)) self.eps = eps self.scale = scale def forward(self, x): """Forward function.""" # normalization layer convert to FP32 in FP16 training x_float = x.float() norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps return (self.weight[None, :, None, None].float().expand_as(x_float) * x_float / norm).type_as(x) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/yolo_neck.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import torch import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS class DetectionBlock(BaseModule): """Detection block in YOLO neck. Let out_channels = n, the DetectionBlock contains: Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer. The first 6 ConvLayers are formed the following way: 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n. The Conv2D layer is 1x1x255. Some block will have branch after the fifth ConvLayer. The input channel is arbitrary (in_channels) Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(DetectionBlock, self).__init__(init_cfg) double_out_channels = out_channels * 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) self.conv2 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) self.conv4 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) def forward(self, x): tmp = self.conv1(x) tmp = self.conv2(tmp) tmp = self.conv3(tmp) tmp = self.conv4(tmp) out = self.conv5(tmp) return out @NECKS.register_module() class YOLOV3Neck(BaseModule): """The neck of YOLOV3. It can be treated as a simplified version of FPN. It will take the result from Darknet backbone and do some upsampling and concatenation. It will finally output the detection result. Note: The input feats should be from top to bottom. i.e., from high-lvl to low-lvl But YOLOV3Neck will process them in reversed order. i.e., from bottom (high-lvl) to top (low-lvl) Args: num_scales (int): The number of scales / stages. in_channels (List[int]): The number of input channels per scale. out_channels (List[int]): The number of output channels per scale. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict, optional): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_scales, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(YOLOV3Neck, self).__init__(init_cfg) assert (num_scales == len(in_channels) == len(out_channels)) self.num_scales = num_scales self.in_channels = in_channels self.out_channels = out_channels # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) # To support arbitrary scales, the code looks awful, but it works. # Better solution is welcomed. self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) for i in range(1, self.num_scales): in_c, out_c = self.in_channels[i], self.out_channels[i] inter_c = out_channels[i - 1] self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg)) # in_c + out_c : High-lvl feats will be cat with low-lvl feats self.add_module(f'detect{i+1}', DetectionBlock(in_c + out_c, out_c, **cfg)) def forward(self, feats): assert len(feats) == self.num_scales # processed from bottom (high-lvl) to top (low-lvl) outs = [] out = self.detect1(feats[-1]) outs.append(out) for i, x in enumerate(reversed(feats[:-1])): conv = getattr(self, f'conv{i+1}') tmp = conv(out) # Cat with low-lvl feats tmp = F.interpolate(tmp, scale_factor=2) tmp = torch.cat((tmp, x), 1) detect = getattr(self, f'detect{i+2}') out = detect(tmp) outs.append(out) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/yolox_pafpn.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import BaseModule from ..builder import NECKS from ..utils import CSPLayer @NECKS.register_module() class YOLOXPAFPN(BaseModule): """Path Aggregation Network used in YOLOX. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3 use_depthwise (bool): Whether to depthwise separable convolution in blocks. Default: False upsample_cfg (dict): Config dict for interpolate layer. Default: `dict(scale_factor=2, mode='nearest')` conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN') act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish') init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. """ def __init__(self, in_channels, out_channels, num_csp_blocks=3, use_depthwise=False, upsample_cfg=dict(scale_factor=2, mode='nearest'), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=dict( type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')): super(YOLOXPAFPN, self).__init__(init_cfg) self.in_channels = in_channels self.out_channels = out_channels conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule # build top-down blocks self.upsample = nn.Upsample(**upsample_cfg) self.reduce_layers = nn.ModuleList() self.top_down_blocks = nn.ModuleList() for idx in range(len(in_channels) - 1, 0, -1): self.reduce_layers.append( ConvModule( in_channels[idx], in_channels[idx - 1], 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.top_down_blocks.append( CSPLayer( in_channels[idx - 1] * 2, in_channels[idx - 1], num_blocks=num_csp_blocks, add_identity=False, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) # build bottom-up blocks self.downsamples = nn.ModuleList() self.bottom_up_blocks = nn.ModuleList() for idx in range(len(in_channels) - 1): self.downsamples.append( conv( in_channels[idx], in_channels[idx], 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.bottom_up_blocks.append( CSPLayer( in_channels[idx] * 2, in_channels[idx + 1], num_blocks=num_csp_blocks, add_identity=False, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.out_convs = nn.ModuleList() for i in range(len(in_channels)): self.out_convs.append( ConvModule( in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): """ Args: inputs (tuple[Tensor]): input features. Returns: tuple[Tensor]: YOLOXPAFPN features. """ assert len(inputs) == len(self.in_channels) # top-down path inner_outs = [inputs[-1]] for idx in range(len(self.in_channels) - 1, 0, -1): feat_heigh = inner_outs[0] feat_low = inputs[idx - 1] feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx]( feat_heigh) inner_outs[0] = feat_heigh upsample_feat = self.upsample(feat_heigh) inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx]( torch.cat([upsample_feat, feat_low], 1)) inner_outs.insert(0, inner_out) # bottom-up path outs = [inner_outs[0]] for idx in range(len(self.in_channels) - 1): feat_low = outs[-1] feat_height = inner_outs[idx + 1] downsample_feat = self.downsamples[idx](feat_low) out = self.bottom_up_blocks[idx]( torch.cat([downsample_feat, feat_height], 1)) outs.append(out) # out convs for idx, conv in enumerate(self.out_convs): outs[idx] = conv(outs[idx]) return tuple(outs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/plugins/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .dropblock import DropBlock from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder __all__ = [ 'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder', 'MSDeformAttnPixelDecoder' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/plugins/dropblock.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import PLUGIN_LAYERS eps = 1e-6 @PLUGIN_LAYERS.register_module() class DropBlock(nn.Module): """Randomly drop some regions of feature maps. Please refer to the method proposed in `DropBlock `_ for details. Args: drop_prob (float): The probability of dropping each block. block_size (int): The size of dropped blocks. warmup_iters (int): The drop probability will linearly increase from `0` to `drop_prob` during the first `warmup_iters` iterations. Default: 2000. """ def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs): super(DropBlock, self).__init__() assert block_size % 2 == 1 assert 0 < drop_prob <= 1 assert warmup_iters >= 0 self.drop_prob = drop_prob self.block_size = block_size self.warmup_iters = warmup_iters self.iter_cnt = 0 def forward(self, x): """ Args: x (Tensor): Input feature map on which some areas will be randomly dropped. Returns: Tensor: The tensor after DropBlock layer. """ if not self.training: return x self.iter_cnt += 1 N, C, H, W = list(x.shape) gamma = self._compute_gamma((H, W)) mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1) mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device)) mask = F.pad(mask, [self.block_size // 2] * 4, value=0) mask = F.max_pool2d( input=mask, stride=(1, 1), kernel_size=(self.block_size, self.block_size), padding=self.block_size // 2) mask = 1 - mask x = x * mask * mask.numel() / (eps + mask.sum()) return x def _compute_gamma(self, feat_size): """Compute the value of gamma according to paper. gamma is the parameter of bernoulli distribution, which controls the number of features to drop. gamma = (drop_prob * fm_area) / (drop_area * keep_area) Args: feat_size (tuple[int, int]): The height and width of feature map. Returns: float: The value of gamma. """ gamma = (self.drop_prob * feat_size[0] * feat_size[1]) gamma /= ((feat_size[0] - self.block_size + 1) * (feat_size[1] - self.block_size + 1)) gamma /= (self.block_size**2) factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt / self.warmup_iters) return gamma * factor def extra_repr(self): return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, ' f'warmup_iters={self.warmup_iters}') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/plugins/msdeformattn_pixel_decoder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init, normal_init, xavier_init) from mmcv.cnn.bricks.transformer import (build_positional_encoding, build_transformer_layer_sequence) from mmcv.runner import BaseModule, ModuleList from mmdet.core.anchor import MlvlPointGenerator from mmdet.models.utils.transformer import MultiScaleDeformableAttention @PLUGIN_LAYERS.register_module() class MSDeformAttnPixelDecoder(BaseModule): """Pixel decoder with multi-scale deformable attention. Args: in_channels (list[int] | tuple[int]): Number of channels in the input feature maps. strides (list[int] | tuple[int]): Output strides of feature from backbone. feat_channels (int): Number of channels for feature. out_channels (int): Number of channels for output. num_outs (int): Number of output scales. norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. Defaults to dict(type='GN', num_groups=32). act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. Defaults to dict(type='ReLU'). encoder (:obj:`mmcv.ConfigDict` | dict): Config for transformer encoder. Defaults to `DetrTransformerEncoder`. positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for transformer encoder position encoding. Defaults to dict(type='SinePositionalEncoding', num_feats=128, normalize=True). init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. """ def __init__(self, in_channels=[256, 512, 1024, 2048], strides=[4, 8, 16, 32], feat_channels=256, out_channels=256, num_outs=3, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiScaleDeformableAttention', embed_dims=256, num_heads=8, num_levels=3, num_points=4, im2col_step=64, dropout=0.0, batch_first=False, norm_cfg=None, init_cfg=None), feedforward_channels=1024, ffn_dropout=0.0, operation_order=('self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), init_cfg=None): super().__init__(init_cfg=init_cfg) self.strides = strides self.num_input_levels = len(in_channels) self.num_encoder_levels = \ encoder.transformerlayers.attn_cfgs.num_levels assert self.num_encoder_levels >= 1, \ 'num_levels in attn_cfgs must be at least one' input_conv_list = [] # from top to down (low to high resolution) for i in range(self.num_input_levels - 1, self.num_input_levels - self.num_encoder_levels - 1, -1): input_conv = ConvModule( in_channels[i], feat_channels, kernel_size=1, norm_cfg=norm_cfg, act_cfg=None, bias=True) input_conv_list.append(input_conv) self.input_convs = ModuleList(input_conv_list) self.encoder = build_transformer_layer_sequence(encoder) self.postional_encoding = build_positional_encoding( positional_encoding) # high resolution to low resolution self.level_encoding = nn.Embedding(self.num_encoder_levels, feat_channels) # fpn-like structure self.lateral_convs = ModuleList() self.output_convs = ModuleList() self.use_bias = norm_cfg is None # from top to down (low to high resolution) # fpn for the rest features that didn't pass in encoder for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1, -1): lateral_conv = ConvModule( in_channels[i], feat_channels, kernel_size=1, bias=self.use_bias, norm_cfg=norm_cfg, act_cfg=None) output_conv = ConvModule( feat_channels, feat_channels, kernel_size=3, stride=1, padding=1, bias=self.use_bias, norm_cfg=norm_cfg, act_cfg=act_cfg) self.lateral_convs.append(lateral_conv) self.output_convs.append(output_conv) self.mask_feature = Conv2d( feat_channels, out_channels, kernel_size=1, stride=1, padding=0) self.num_outs = num_outs self.point_generator = MlvlPointGenerator(strides) def init_weights(self): """Initialize weights.""" for i in range(0, self.num_encoder_levels): xavier_init( self.input_convs[i].conv, gain=1, bias=0, distribution='uniform') for i in range(0, self.num_input_levels - self.num_encoder_levels): caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) caffe2_xavier_init(self.output_convs[i].conv, bias=0) caffe2_xavier_init(self.mask_feature, bias=0) normal_init(self.level_encoding, mean=0, std=1) for p in self.encoder.parameters(): if p.dim() > 1: nn.init.xavier_normal_(p) # init_weights defined in MultiScaleDeformableAttention for layer in self.encoder.layers: for attn in layer.attentions: if isinstance(attn, MultiScaleDeformableAttention): attn.init_weights() def forward(self, feats): """ Args: feats (list[Tensor]): Feature maps of each level. Each has shape of (batch_size, c, h, w). Returns: tuple: A tuple containing the following: - mask_feature (Tensor): shape (batch_size, c, h, w). - multi_scale_features (list[Tensor]): Multi scale \ features, each in shape (batch_size, c, h, w). """ # generate padding mask for each level, for each image batch_size = feats[0].shape[0] encoder_input_list = [] padding_mask_list = [] level_positional_encoding_list = [] spatial_shapes = [] reference_points_list = [] for i in range(self.num_encoder_levels): level_idx = self.num_input_levels - i - 1 feat = feats[level_idx] feat_projected = self.input_convs[i](feat) h, w = feat.shape[-2:] # no padding padding_mask_resized = feat.new_zeros( (batch_size, ) + feat.shape[-2:], dtype=torch.bool) pos_embed = self.postional_encoding(padding_mask_resized) level_embed = self.level_encoding.weight[i] level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed # (h_i * w_i, 2) reference_points = self.point_generator.single_level_grid_priors( feat.shape[-2:], level_idx, device=feat.device) # normalize factor = feat.new_tensor([[w, h]]) * self.strides[level_idx] reference_points = reference_points / factor # shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c) feat_projected = feat_projected.flatten(2).permute(2, 0, 1) level_pos_embed = level_pos_embed.flatten(2).permute(2, 0, 1) padding_mask_resized = padding_mask_resized.flatten(1) encoder_input_list.append(feat_projected) padding_mask_list.append(padding_mask_resized) level_positional_encoding_list.append(level_pos_embed) spatial_shapes.append(feat.shape[-2:]) reference_points_list.append(reference_points) # shape (batch_size, total_num_query), # total_num_query=sum([., h_i * w_i,.]) padding_masks = torch.cat(padding_mask_list, dim=1) # shape (total_num_query, batch_size, c) encoder_inputs = torch.cat(encoder_input_list, dim=0) level_positional_encodings = torch.cat( level_positional_encoding_list, dim=0) device = encoder_inputs.device # shape (num_encoder_levels, 2), from low # resolution to high resolution spatial_shapes = torch.as_tensor( spatial_shapes, dtype=torch.long, device=device) # shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...) level_start_index = torch.cat((spatial_shapes.new_zeros( (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) reference_points = torch.cat(reference_points_list, dim=0) reference_points = reference_points[None, :, None].repeat( batch_size, 1, self.num_encoder_levels, 1) valid_radios = reference_points.new_ones( (batch_size, self.num_encoder_levels, 2)) # shape (num_total_query, batch_size, c) memory = self.encoder( query=encoder_inputs, key=None, value=None, query_pos=level_positional_encodings, key_pos=None, attn_masks=None, key_padding_mask=None, query_key_padding_mask=padding_masks, spatial_shapes=spatial_shapes, reference_points=reference_points, level_start_index=level_start_index, valid_radios=valid_radios) # (num_total_query, batch_size, c) -> (batch_size, c, num_total_query) memory = memory.permute(1, 2, 0) # from low resolution to high resolution num_query_per_level = [e[0] * e[1] for e in spatial_shapes] outs = torch.split(memory, num_query_per_level, dim=-1) outs = [ x.reshape(batch_size, -1, spatial_shapes[i][0], spatial_shapes[i][1]) for i, x in enumerate(outs) ] for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1, -1): x = feats[i] cur_feat = self.lateral_convs[i](x) y = cur_feat + F.interpolate( outs[-1], size=cur_feat.shape[-2:], mode='bilinear', align_corners=False) y = self.output_convs[i](y) outs.append(y) multi_scale_features = outs[:self.num_outs] mask_feature = self.mask_feature(outs[-1]) return mask_feature, multi_scale_features ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/plugins/pixel_decoder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init from mmcv.cnn.bricks.transformer import (build_positional_encoding, build_transformer_layer_sequence) from mmcv.runner import BaseModule, ModuleList @PLUGIN_LAYERS.register_module() class PixelDecoder(BaseModule): """Pixel decoder with a structure like fpn. Args: in_channels (list[int] | tuple[int]): Number of channels in the input feature maps. feat_channels (int): Number channels for feature. out_channels (int): Number channels for output. norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. Defaults to dict(type='GN', num_groups=32). act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. Defaults to dict(type='ReLU'). encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer encoder.Defaults to None. positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for transformer encoder position encoding. Defaults to dict(type='SinePositionalEncoding', num_feats=128, normalize=True). init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. Default: None """ def __init__(self, in_channels, feat_channels, out_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), init_cfg=None): super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.num_inputs = len(in_channels) self.lateral_convs = ModuleList() self.output_convs = ModuleList() self.use_bias = norm_cfg is None for i in range(0, self.num_inputs - 1): lateral_conv = ConvModule( in_channels[i], feat_channels, kernel_size=1, bias=self.use_bias, norm_cfg=norm_cfg, act_cfg=None) output_conv = ConvModule( feat_channels, feat_channels, kernel_size=3, stride=1, padding=1, bias=self.use_bias, norm_cfg=norm_cfg, act_cfg=act_cfg) self.lateral_convs.append(lateral_conv) self.output_convs.append(output_conv) self.last_feat_conv = ConvModule( in_channels[-1], feat_channels, kernel_size=3, padding=1, stride=1, bias=self.use_bias, norm_cfg=norm_cfg, act_cfg=act_cfg) self.mask_feature = Conv2d( feat_channels, out_channels, kernel_size=3, stride=1, padding=1) def init_weights(self): """Initialize weights.""" for i in range(0, self.num_inputs - 2): caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) caffe2_xavier_init(self.output_convs[i].conv, bias=0) caffe2_xavier_init(self.mask_feature, bias=0) caffe2_xavier_init(self.last_feat_conv, bias=0) def forward(self, feats, img_metas): """ Args: feats (list[Tensor]): Feature maps of each level. Each has shape of (batch_size, c, h, w). img_metas (list[dict]): List of image information. Pass in for creating more accurate padding mask. Not used here. Returns: tuple: a tuple containing the following: - mask_feature (Tensor): Shape (batch_size, c, h, w). - memory (Tensor): Output of last stage of backbone.\ Shape (batch_size, c, h, w). """ y = self.last_feat_conv(feats[-1]) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_feat = self.lateral_convs[i](x) y = cur_feat + \ F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) memory = feats[-1] return mask_feature, memory @PLUGIN_LAYERS.register_module() class TransformerEncoderPixelDecoder(PixelDecoder): """Pixel decoder with transormer encoder inside. Args: in_channels (list[int] | tuple[int]): Number of channels in the input feature maps. feat_channels (int): Number channels for feature. out_channels (int): Number channels for output. norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. Defaults to dict(type='GN', num_groups=32). act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. Defaults to dict(type='ReLU'). encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer encoder.Defaults to None. positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for transformer encoder position encoding. Defaults to dict(type='SinePositionalEncoding', num_feats=128, normalize=True). init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. Default: None """ def __init__(self, in_channels, feat_channels, out_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=None, positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), init_cfg=None): super(TransformerEncoderPixelDecoder, self).__init__( in_channels, feat_channels, out_channels, norm_cfg, act_cfg, init_cfg=init_cfg) self.last_feat_conv = None self.encoder = build_transformer_layer_sequence(encoder) self.encoder_embed_dims = self.encoder.embed_dims assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \ 'tranformer encoder must equal to feat_channels({})'.format( feat_channels, self.encoder_embed_dims) self.positional_encoding = build_positional_encoding( positional_encoding) self.encoder_in_proj = Conv2d( in_channels[-1], feat_channels, kernel_size=1) self.encoder_out_proj = ConvModule( feat_channels, feat_channels, kernel_size=3, stride=1, padding=1, bias=self.use_bias, norm_cfg=norm_cfg, act_cfg=act_cfg) def init_weights(self): """Initialize weights.""" for i in range(0, self.num_inputs - 2): caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) caffe2_xavier_init(self.output_convs[i].conv, bias=0) caffe2_xavier_init(self.mask_feature, bias=0) caffe2_xavier_init(self.encoder_in_proj, bias=0) caffe2_xavier_init(self.encoder_out_proj.conv, bias=0) for p in self.encoder.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def forward(self, feats, img_metas): """ Args: feats (list[Tensor]): Feature maps of each level. Each has shape of (batch_size, c, h, w). img_metas (list[dict]): List of image information. Pass in for creating more accurate padding mask. Returns: tuple: a tuple containing the following: - mask_feature (Tensor): shape (batch_size, c, h, w). - memory (Tensor): shape (batch_size, c, h, w). """ feat_last = feats[-1] bs, c, h, w = feat_last.shape input_img_h, input_img_w = img_metas[0]['batch_input_shape'] padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w), dtype=torch.float32) for i in range(bs): img_h, img_w, _ = img_metas[i]['img_shape'] padding_mask[i, :img_h, :img_w] = 0 padding_mask = F.interpolate( padding_mask.unsqueeze(1), size=feat_last.shape[-2:], mode='nearest').to(torch.bool).squeeze(1) pos_embed = self.positional_encoding(padding_mask) feat_last = self.encoder_in_proj(feat_last) # (batch_size, c, h, w) -> (num_queries, batch_size, c) feat_last = feat_last.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1) # (batch_size, h, w) -> (batch_size, h*w) padding_mask = padding_mask.flatten(1) memory = self.encoder( query=feat_last, key=None, value=None, query_pos=pos_embed, query_key_padding_mask=padding_mask) # (num_queries, batch_size, c) -> (batch_size, c, h, w) memory = memory.permute(1, 2, 0).view(bs, self.encoder_embed_dims, h, w) y = self.encoder_out_proj(memory) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_feat = self.lateral_convs[i](x) y = cur_feat + \ F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) return mask_feature, memory ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .base_roi_head import BaseRoIHead from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead, DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead, Shared2FCBBoxHead, Shared4Conv1FCBBoxHead) from .cascade_roi_head import CascadeRoIHead from .double_roi_head import DoubleHeadRoIHead from .dynamic_roi_head import DynamicRoIHead from .grid_roi_head import GridRoIHead from .htc_roi_head import HybridTaskCascadeRoIHead from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead, FusedSemanticHead, GlobalContextHead, GridHead, HTCMaskHead, MaskIoUHead, MaskPointHead, SCNetMaskHead, SCNetSemanticHead) from .mask_scoring_roi_head import MaskScoringRoIHead from .pisa_roi_head import PISARoIHead from .point_rend_roi_head import PointRendRoIHead from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor, SingleRoIExtractor) from .scnet_roi_head import SCNetRoIHead from .shared_heads import ResLayer from .sparse_roi_head import SparseRoIHead from .standard_roi_head import StandardRoIHead from .trident_roi_head import TridentRoIHead __all__ = [ 'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead', 'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead', 'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead', 'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor', 'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead', 'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead', 'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead', 'FeatureRelayHead', 'GlobalContextHead' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/base_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule from ..builder import build_shared_head class BaseRoIHead(BaseModule, metaclass=ABCMeta): """Base class for RoIHeads.""" def __init__(self, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(BaseRoIHead, self).__init__(init_cfg) self.train_cfg = train_cfg self.test_cfg = test_cfg if shared_head is not None: shared_head.pretrained = pretrained self.shared_head = build_shared_head(shared_head) if bbox_head is not None: self.init_bbox_head(bbox_roi_extractor, bbox_head) if mask_head is not None: self.init_mask_head(mask_roi_extractor, mask_head) self.init_assigner_sampler() @property def with_bbox(self): """bool: whether the RoI head contains a `bbox_head`""" return hasattr(self, 'bbox_head') and self.bbox_head is not None @property def with_mask(self): """bool: whether the RoI head contains a `mask_head`""" return hasattr(self, 'mask_head') and self.mask_head is not None @property def with_shared_head(self): """bool: whether the RoI head contains a `shared_head`""" return hasattr(self, 'shared_head') and self.shared_head is not None @abstractmethod def init_bbox_head(self): """Initialize ``bbox_head``""" pass @abstractmethod def init_mask_head(self): """Initialize ``mask_head``""" pass @abstractmethod def init_assigner_sampler(self): """Initialize assigner and sampler.""" pass @abstractmethod def forward_train(self, x, img_meta, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, **kwargs): """Forward function during training.""" async def async_simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False, **kwargs): """Asynchronized test function.""" raise NotImplementedError def simple_test(self, x, proposal_list, img_meta, proposals=None, rescale=False, **kwargs): """Test without augmentation.""" def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .bbox_head import BBoxHead from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, Shared4Conv1FCBBoxHead) from .dii_head import DIIHead from .double_bbox_head import DoubleConvFCBBoxHead from .sabl_head import SABLHead from .scnet_bbox_head import SCNetBBoxHead __all__ = [ 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead', 'SCNetBBoxHead' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/bbox_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.runner import BaseModule, auto_fp16, force_fp32 from torch.nn.modules.utils import _pair from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms from mmdet.models.builder import HEADS, build_loss from mmdet.models.losses import accuracy from mmdet.models.utils import build_linear_layer @HEADS.register_module() class BBoxHead(BaseModule): """Simplest RoI head, with only two fc layers for classification and regression respectively.""" def __init__(self, with_avg_pool=False, with_cls=True, with_reg=True, roi_feat_size=7, in_channels=256, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, reg_decoded_bbox=False, reg_predictor_cfg=dict(type='Linear'), cls_predictor_cfg=dict(type='Linear'), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0), init_cfg=None): super(BBoxHead, self).__init__(init_cfg) assert with_cls or with_reg self.with_avg_pool = with_avg_pool self.with_cls = with_cls self.with_reg = with_reg self.roi_feat_size = _pair(roi_feat_size) self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] self.in_channels = in_channels self.num_classes = num_classes self.reg_class_agnostic = reg_class_agnostic self.reg_decoded_bbox = reg_decoded_bbox self.reg_predictor_cfg = reg_predictor_cfg self.cls_predictor_cfg = cls_predictor_cfg self.fp16_enabled = False self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) in_channels = self.in_channels if self.with_avg_pool: self.avg_pool = nn.AvgPool2d(self.roi_feat_size) else: in_channels *= self.roi_feat_area if self.with_cls: # need to add background class if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels(self.num_classes) else: cls_channels = num_classes + 1 self.fc_cls = build_linear_layer( self.cls_predictor_cfg, in_features=in_channels, out_features=cls_channels) if self.with_reg: out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes self.fc_reg = build_linear_layer( self.reg_predictor_cfg, in_features=in_channels, out_features=out_dim_reg) self.debug_imgs = None if init_cfg is None: self.init_cfg = [] if self.with_cls: self.init_cfg += [ dict( type='Normal', std=0.01, override=dict(name='fc_cls')) ] if self.with_reg: self.init_cfg += [ dict( type='Normal', std=0.001, override=dict(name='fc_reg')) ] @property def custom_cls_channels(self): return getattr(self.loss_cls, 'custom_cls_channels', False) @property def custom_activation(self): return getattr(self.loss_cls, 'custom_activation', False) @property def custom_accuracy(self): return getattr(self.loss_cls, 'custom_accuracy', False) @auto_fp16() def forward(self, x): if self.with_avg_pool: if x.numel() > 0: x = self.avg_pool(x) x = x.view(x.size(0), -1) else: # avg_pool does not support empty tensor, # so use torch.mean instead it x = torch.mean(x, dim=(-1, -2)) cls_score = self.fc_cls(x) if self.with_cls else None bbox_pred = self.fc_reg(x) if self.with_reg else None return cls_score, bbox_pred def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes, pos_gt_labels, cfg): """Calculate the ground truth for proposals in the single image according to the sampling results. Args: pos_bboxes (Tensor): Contains all the positive boxes, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. neg_bboxes (Tensor): Contains all the negative boxes, has shape (num_neg, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_bboxes (Tensor): Contains gt_boxes for all positive samples, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_labels (Tensor): Contains gt_labels for all positive samples, has shape (num_pos, ). cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following Tensors: - labels(Tensor): Gt_labels for all proposals, has shape (num_proposals,). - label_weights(Tensor): Labels_weights for all proposals, has shape (num_proposals,). - bbox_targets(Tensor):Regression target for all proposals, has shape (num_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights(Tensor):Regression weights for all proposals, has shape (num_proposals, 4). """ num_pos = pos_bboxes.size(0) num_neg = neg_bboxes.size(0) num_samples = num_pos + num_neg # original implementation uses new_zeros since BG are set to be 0 # now use empty & fill because BG cat_id = num_classes, # FG cat_id = [0, num_classes-1] labels = pos_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_bboxes.new_zeros(num_samples) bbox_targets = pos_bboxes.new_zeros(num_samples, 4) bbox_weights = pos_bboxes.new_zeros(num_samples, 4) if num_pos > 0: labels[:num_pos] = pos_gt_labels pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight label_weights[:num_pos] = pos_weight if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( pos_bboxes, pos_gt_bboxes) else: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, both # the predicted boxes and regression targets should be with # absolute coordinate format. pos_bbox_targets = pos_gt_bboxes bbox_targets[:num_pos, :] = pos_bbox_targets bbox_weights[:num_pos, :] = 1 if num_neg > 0: label_weights[-num_neg:] = 1.0 return labels, label_weights, bbox_targets, bbox_weights def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, concat=True): """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_target_single` function. Args: sampling_results (List[obj:SamplingResults]): Assign results of all images in a batch after sampling. gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, each tensor has shape (num_gt, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. gt_labels (list[Tensor]): Gt_labels of all images in a batch, each tensor has shape (num_gt,). rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ pos_bboxes_list = [res.pos_bboxes for res in sampling_results] neg_bboxes_list = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] labels, label_weights, bbox_targets, bbox_weights = multi_apply( self._get_target_single, pos_bboxes_list, neg_bboxes_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights @force_fp32(apply_to=('cls_score', 'bbox_pred')) def loss(self, cls_score, bbox_pred, rois, labels, label_weights, bbox_targets, bbox_weights, reduction_override=None): losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) if cls_score.numel() > 0: loss_cls_ = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) if isinstance(loss_cls_, dict): losses.update(loss_cls_) else: losses['loss_cls'] = loss_cls_ if self.custom_activation: acc_ = self.loss_cls.get_accuracy(cls_score, labels) losses.update(acc_) else: losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bg_class_ind = self.num_classes # 0~self.num_classes-1 are FG, self.num_classes is BG pos_inds = (labels >= 0) & (labels < bg_class_ind) # do not perform bounding box regression for BG anymore. if pos_inds.any(): if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, # `GIouLoss`, `DIouLoss`) is applied directly on # the decoded bounding boxes, it decodes the # already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred) if self.reg_class_agnostic: pos_bbox_pred = bbox_pred.view( bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] else: pos_bbox_pred = bbox_pred.view( bbox_pred.size(0), -1, 4)[pos_inds.type(torch.bool), labels[pos_inds.type(torch.bool)]] losses['loss_bbox'] = self.loss_bbox( pos_bbox_pred, bbox_targets[pos_inds.type(torch.bool)], bbox_weights[pos_inds.type(torch.bool)], avg_factor=bbox_targets.size(0), reduction_override=reduction_override) else: losses['loss_bbox'] = bbox_pred[pos_inds].sum() return losses @force_fp32(apply_to=('cls_score', 'bbox_pred')) def get_bboxes(self, rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None): """Transform network output for a batch into bbox predictions. Args: rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor, optional): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_shape (Sequence[int], optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). scale_factor (ndarray): Scale factor of the image arrange as (w_scale, h_scale, w_scale, h_scale). rescale (bool): If True, return boxes in original image space. Default: False. cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None Returns: tuple[Tensor, Tensor]: First tensor is `det_bboxes`, has the shape (num_boxes, 5) and last dimension 5 represent (tl_x, tl_y, br_x, br_y, score). Second tensor is the labels with shape (num_boxes, ). """ # some loss (Seesaw loss..) may have custom activation if self.custom_cls_channels: scores = self.loss_cls.get_activation(cls_score) else: scores = F.softmax( cls_score, dim=-1) if cls_score is not None else None # bbox_pred would be None in some detector when with_reg is False, # e.g. Grid R-CNN. if bbox_pred is not None: bboxes = self.bbox_coder.decode( rois[..., 1:], bbox_pred, max_shape=img_shape) else: bboxes = rois[:, 1:].clone() if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1]) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0]) if rescale and bboxes.size(0) > 0: scale_factor = bboxes.new_tensor(scale_factor) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if cfg is None: return bboxes, scores else: det_bboxes, det_labels = multiclass_nms(bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels @force_fp32(apply_to=('bbox_preds', )) def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): """Refine bboxes during training. Args: rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, and bs is the sampled RoIs per image. The first column is the image id and the next 4 columns are x1, y1, x2, y2. labels (Tensor): Shape (n*bs, ). bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class). pos_is_gts (list[Tensor]): Flags indicating if each positive bbox is a gt bbox. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. Example: >>> # xdoctest: +REQUIRES(module:kwarray) >>> import kwarray >>> import numpy as np >>> from mmdet.core.bbox.demodata import random_boxes >>> self = BBoxHead(reg_class_agnostic=True) >>> n_roi = 2 >>> n_img = 4 >>> scale = 512 >>> rng = np.random.RandomState(0) >>> img_metas = [{'img_shape': (scale, scale)} ... for _ in range(n_img)] >>> # Create rois in the expected format >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng) >>> img_ids = torch.randint(0, n_img, (n_roi,)) >>> img_ids = img_ids.float() >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1) >>> # Create other args >>> labels = torch.randint(0, 2, (n_roi,)).long() >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) >>> # For each image, pretend random positive boxes are gts >>> is_label_pos = (labels.numpy() > 0).astype(np.int) >>> lbl_per_img = kwarray.group_items(is_label_pos, ... img_ids.numpy()) >>> pos_per_img = [sum(lbl_per_img.get(gid, [])) ... for gid in range(n_img)] >>> pos_is_gts = [ >>> torch.randint(0, 2, (npos,)).byte().sort( >>> descending=True)[0] >>> for npos in pos_per_img >>> ] >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds, >>> pos_is_gts, img_metas) >>> print(bboxes_list) """ img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() <= len(img_metas) bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] bbox_pred_ = bbox_preds[inds] img_meta_ = img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) return bboxes_list @force_fp32(apply_to=('bbox_pred', )) def regress_by_class(self, rois, label, bbox_pred, img_meta): """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): Rois from `rpn_head` or last stage `bbox_head`, has shape (num_proposals, 4) or (num_proposals, 5). label (Tensor): Only used when `self.reg_class_agnostic` is False, has shape (num_proposals, ). bbox_pred (Tensor): Regression prediction of current stage `bbox_head`. When `self.reg_class_agnostic` is False, it has shape (n, num_classes * 4), otherwise it has shape (n, 4). img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape) if not self.reg_class_agnostic: label = label * 4 inds = torch.stack((label, label + 1, label + 2, label + 3), 1) bbox_pred = torch.gather(bbox_pred, 1, inds) assert bbox_pred.size(1) == 4 max_shape = img_meta['img_shape'] if rois.size(1) == 4: new_rois = self.bbox_coder.decode( rois, bbox_pred, max_shape=max_shape) else: bboxes = self.bbox_coder.decode( rois[:, 1:], bbox_pred, max_shape=max_shape) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois def onnx_export(self, rois, cls_score, bbox_pred, img_shape, cfg=None, **kwargs): """Transform network output for a batch into bbox predictions. Args: rois (Tensor): Boxes to be transformed. Has shape (B, num_boxes, 5) cls_score (Tensor): Box scores. has shape (B, num_boxes, num_classes + 1), 1 represent the background. bbox_pred (Tensor, optional): Box energies / deltas for, has shape (B, num_boxes, num_classes * 4) when. img_shape (torch.Tensor): Shape of image. cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ assert rois.ndim == 3, 'Only support export two stage ' \ 'model to ONNX ' \ 'with batch dimension. ' if self.custom_cls_channels: scores = self.loss_cls.get_activation(cls_score) else: scores = F.softmax( cls_score, dim=-1) if cls_score is not None else None if bbox_pred is not None: bboxes = self.bbox_coder.decode( rois[..., 1:], bbox_pred, max_shape=img_shape) else: bboxes = rois[..., 1:].clone() if img_shape is not None: max_shape = bboxes.new_tensor(img_shape)[..., :2] min_xy = bboxes.new_tensor(0) max_xy = torch.cat( [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment from mmdet.core.export import add_dummy_nms_for_onnx max_output_boxes_per_class = cfg.nms.get('max_output_boxes_per_class', cfg.max_per_img) iou_threshold = cfg.nms.get('iou_threshold', 0.5) score_threshold = cfg.score_thr nms_pre = cfg.get('deploy_nms_pre', -1) scores = scores[..., :self.num_classes] if self.reg_class_agnostic: return add_dummy_nms_for_onnx( bboxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, pre_top_k=nms_pre, after_top_k=cfg.max_per_img) else: batch_size = scores.shape[0] labels = torch.arange( self.num_classes, dtype=torch.long).to(scores.device) labels = labels.view(1, 1, -1).expand_as(scores) labels = labels.reshape(batch_size, -1) scores = scores.reshape(batch_size, -1) bboxes = bboxes.reshape(batch_size, -1, 4) max_size = torch.max(img_shape) # Offset bboxes of each class so that bboxes of different labels # do not overlap. offsets = (labels * max_size + 1).unsqueeze(2) bboxes_for_nms = bboxes + offsets batch_dets, labels = add_dummy_nms_for_onnx( bboxes_for_nms, scores.unsqueeze(2), max_output_boxes_per_class, iou_threshold, score_threshold, pre_top_k=nms_pre, after_top_k=cfg.max_per_img, labels=labels) # Offset the bboxes back after dummy nms. offsets = (labels * max_size + 1).unsqueeze(2) # Indexing + inplace operation fails with dynamic shape in ONNX # original style: batch_dets[..., :4] -= offsets bboxes, scores = batch_dets[..., 0:4], batch_dets[..., 4:5] bboxes -= offsets batch_dets = torch.cat([bboxes, scores], dim=2) return batch_dets, labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmdet.models.builder import HEADS from mmdet.models.utils import build_linear_layer from .bbox_head import BBoxHead @HEADS.register_module() class ConvFCBBoxHead(BBoxHead): r"""More general bbox head, with shared conv and fc layers and two optional separated branches. .. code-block:: none /-> cls convs -> cls fcs -> cls shared convs -> shared fcs \-> reg convs -> reg fcs -> reg """ # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, init_cfg=None, *args, **kwargs): super(ConvFCBBoxHead, self).__init__( *args, init_cfg=init_cfg, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are changed if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels(self.num_classes) else: cls_channels = self.num_classes + 1 self.fc_cls = build_linear_layer( self.cls_predictor_cfg, in_features=self.cls_last_dim, out_features=cls_channels) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = build_linear_layer( self.reg_predictor_cfg, in_features=self.reg_last_dim, out_features=out_dim_reg) if init_cfg is None: # when init_cfg is None, # It has been set to # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] # after `super(ConvFCBBoxHead, self).__init__()` # we only need to append additional configuration # for `shared_fcs`, `cls_fcs` and `reg_fcs` self.init_cfg += [ dict( type='Xavier', distribution='uniform', override=[ dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs') ]) ] def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def forward(self, x): # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None return cls_score, bbox_pred @HEADS.register_module() class Shared2FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared2FCBBoxHead, self).__init__( num_shared_convs=0, num_shared_fcs=2, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs) @HEADS.register_module() class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared4Conv1FCBBoxHead, self).__init__( num_shared_convs=4, num_shared_fcs=1, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/dii_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import (bias_init_with_prob, build_activation_layer, build_norm_layer) from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention from mmcv.runner import auto_fp16, force_fp32 from mmdet.core import multi_apply from mmdet.models.builder import HEADS, build_loss from mmdet.models.dense_heads.atss_head import reduce_mean from mmdet.models.losses import accuracy from mmdet.models.utils import build_transformer from .bbox_head import BBoxHead @HEADS.register_module() class DIIHead(BBoxHead): r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object Detection with Learnable Proposals `_ Args: num_classes (int): Number of class in dataset. Defaults to 80. num_ffn_fcs (int): The number of fully-connected layers in FFNs. Defaults to 2. num_heads (int): The hidden dimension of FFNs. Defaults to 8. num_cls_fcs (int): The number of fully-connected layers in classification subnet. Defaults to 1. num_reg_fcs (int): The number of fully-connected layers in regression subnet. Defaults to 3. feedforward_channels (int): The hidden dimension of FFNs. Defaults to 2048 in_channels (int): Hidden_channels of MultiheadAttention. Defaults to 256. dropout (float): Probability of drop the channel. Defaults to 0.0 ffn_act_cfg (dict): The activation config for FFNs. dynamic_conv_cfg (dict): The convolution config for DynamicConv. loss_iou (dict): The config for iou or giou loss. """ def __init__(self, num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_iou=dict(type='GIoULoss', loss_weight=2.0), init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(DIIHead, self).__init__( num_classes=num_classes, reg_decoded_bbox=True, reg_class_agnostic=True, init_cfg=init_cfg, **kwargs) self.loss_iou = build_loss(loss_iou) self.in_channels = in_channels self.fp16_enabled = False self.attention = MultiheadAttention(in_channels, num_heads, dropout) self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1] self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) self.instance_interactive_conv_dropout = nn.Dropout(dropout) self.instance_interactive_conv_norm = build_norm_layer( dict(type='LN'), in_channels)[1] self.ffn = FFN( in_channels, feedforward_channels, num_ffn_fcs, act_cfg=ffn_act_cfg, dropout=dropout) self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] self.cls_fcs = nn.ModuleList() for _ in range(num_cls_fcs): self.cls_fcs.append( nn.Linear(in_channels, in_channels, bias=False)) self.cls_fcs.append( build_norm_layer(dict(type='LN'), in_channels)[1]) self.cls_fcs.append( build_activation_layer(dict(type='ReLU', inplace=True))) # over load the self.fc_cls in BBoxHead if self.loss_cls.use_sigmoid: self.fc_cls = nn.Linear(in_channels, self.num_classes) else: self.fc_cls = nn.Linear(in_channels, self.num_classes + 1) self.reg_fcs = nn.ModuleList() for _ in range(num_reg_fcs): self.reg_fcs.append( nn.Linear(in_channels, in_channels, bias=False)) self.reg_fcs.append( build_norm_layer(dict(type='LN'), in_channels)[1]) self.reg_fcs.append( build_activation_layer(dict(type='ReLU', inplace=True))) # over load the self.fc_cls in BBoxHead self.fc_reg = nn.Linear(in_channels, 4) assert self.reg_class_agnostic, 'DIIHead only ' \ 'suppport `reg_class_agnostic=True` ' assert self.reg_decoded_bbox, 'DIIHead only ' \ 'suppport `reg_decoded_bbox=True`' def init_weights(self): """Use xavier initialization for all weight parameter and set classification head bias as a specific value when use focal loss.""" super(DIIHead, self).init_weights() for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) else: # adopt the default initialization for # the weight and bias of the layer norm pass if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) nn.init.constant_(self.fc_cls.bias, bias_init) @auto_fp16() def forward(self, roi_feat, proposal_feat): """Forward function of Dynamic Instance Interactive Head. Args: roi_feat (Tensor): Roi-pooling features with shape (batch_size*num_proposals, feature_dimensions, pooling_h , pooling_w). proposal_feat (Tensor): Intermediate feature get from diihead in last stage, has shape (batch_size, num_proposals, feature_dimensions) Returns: tuple[Tensor]: Usually a tuple of classification scores and bbox prediction and a intermediate feature. - cls_scores (Tensor): Classification scores for all proposals, has shape (batch_size, num_proposals, num_classes). - bbox_preds (Tensor): Box energies / deltas for all proposals, has shape (batch_size, num_proposals, 4). - obj_feat (Tensor): Object feature before classification and regression subnet, has shape (batch_size, num_proposal, feature_dimensions). """ N, num_proposals = proposal_feat.shape[:2] # Self attention proposal_feat = proposal_feat.permute(1, 0, 2) proposal_feat = self.attention_norm(self.attention(proposal_feat)) attn_feats = proposal_feat.permute(1, 0, 2) # instance interactive proposal_feat = attn_feats.reshape(-1, self.in_channels) proposal_feat_iic = self.instance_interactive_conv( proposal_feat, roi_feat) proposal_feat = proposal_feat + self.instance_interactive_conv_dropout( proposal_feat_iic) obj_feat = self.instance_interactive_conv_norm(proposal_feat) # FFN obj_feat = self.ffn_norm(self.ffn(obj_feat)) cls_feat = obj_feat reg_feat = obj_feat for cls_layer in self.cls_fcs: cls_feat = cls_layer(cls_feat) for reg_layer in self.reg_fcs: reg_feat = reg_layer(reg_feat) cls_score = self.fc_cls(cls_feat).view( N, num_proposals, self.num_classes if self.loss_cls.use_sigmoid else self.num_classes + 1) bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4) return cls_score, bbox_delta, obj_feat.view( N, num_proposals, self.in_channels), attn_feats @force_fp32(apply_to=('cls_score', 'bbox_pred')) def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, imgs_whwh=None, reduction_override=None, **kwargs): """"Loss function of DIIHead, get loss of all images. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. labels (Tensor): Label of each proposals, has shape (batch_size * num_proposals_single_image label_weights (Tensor): Classification loss weight of each proposals, has shape (batch_size * num_proposals_single_image bbox_targets (Tensor): Regression targets of each proposals, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tensor): Regression loss weight of each proposals's coordinate, has shape (batch_size * num_proposals_single_image, 4), imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\ shape (batch_size, num_proposals, 4), the last dimension means [img_width,img_height, img_width, img_height]. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Defaults to None, Returns: dict[str, Tensor]: Dictionary of loss components """ losses = dict() bg_class_ind = self.num_classes # note in spare rcnn num_gt == num_pos pos_inds = (labels >= 0) & (labels < bg_class_ind) num_pos = pos_inds.sum().float() avg_factor = reduce_mean(num_pos) if cls_score is not None: if cls_score.numel() > 0: losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['pos_acc'] = accuracy(cls_score[pos_inds], labels[pos_inds]) if bbox_pred is not None: # 0~self.num_classes-1 are FG, self.num_classes is BG # do not perform bounding box regression for BG anymore. if pos_inds.any(): pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] losses['loss_bbox'] = self.loss_bbox( pos_bbox_pred / imgs_whwh, bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh, bbox_weights[pos_inds.type(torch.bool)], avg_factor=avg_factor) losses['loss_iou'] = self.loss_iou( pos_bbox_pred, bbox_targets[pos_inds.type(torch.bool)], bbox_weights[pos_inds.type(torch.bool)], avg_factor=avg_factor) else: losses['loss_bbox'] = bbox_pred.sum() * 0 losses['loss_iou'] = bbox_pred.sum() * 0 return losses def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes, pos_gt_bboxes, pos_gt_labels, cfg): """Calculate the ground truth for proposals in the single image according to the sampling results. Almost the same as the implementation in `bbox_head`, we add pos_inds and neg_inds to select positive and negative samples instead of selecting the first num_pos as positive samples. Args: pos_inds (Tensor): The length is equal to the positive sample numbers contain all index of the positive sample in the origin proposal set. neg_inds (Tensor): The length is equal to the negative sample numbers contain all index of the negative sample in the origin proposal set. pos_bboxes (Tensor): Contains all the positive boxes, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. neg_bboxes (Tensor): Contains all the negative boxes, has shape (num_neg, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_bboxes (Tensor): Contains gt_boxes for all positive samples, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_labels (Tensor): Contains gt_labels for all positive samples, has shape (num_pos, ). cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following Tensors: - labels(Tensor): Gt_labels for all proposals, has shape (num_proposals,). - label_weights(Tensor): Labels_weights for all proposals, has shape (num_proposals,). - bbox_targets(Tensor):Regression target for all proposals, has shape (num_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights(Tensor):Regression weights for all proposals, has shape (num_proposals, 4). """ num_pos = pos_bboxes.size(0) num_neg = neg_bboxes.size(0) num_samples = num_pos + num_neg # original implementation uses new_zeros since BG are set to be 0 # now use empty & fill because BG cat_id = num_classes, # FG cat_id = [0, num_classes-1] labels = pos_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_bboxes.new_zeros(num_samples) bbox_targets = pos_bboxes.new_zeros(num_samples, 4) bbox_weights = pos_bboxes.new_zeros(num_samples, 4) if num_pos > 0: labels[pos_inds] = pos_gt_labels pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight label_weights[pos_inds] = pos_weight if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( pos_bboxes, pos_gt_bboxes) else: pos_bbox_targets = pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1 if num_neg > 0: label_weights[neg_inds] = 1.0 return labels, label_weights, bbox_targets, bbox_weights def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, concat=True): """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_target_single` function. Args: sampling_results (List[obj:SamplingResults]): Assign results of all images in a batch after sampling. gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, each tensor has shape (num_gt, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. gt_labels (list[Tensor]): Gt_labels of all images in a batch, each tensor has shape (num_gt,). rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ pos_inds_list = [res.pos_inds for res in sampling_results] neg_inds_list = [res.neg_inds for res in sampling_results] pos_bboxes_list = [res.pos_bboxes for res in sampling_results] neg_bboxes_list = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] labels, label_weights, bbox_targets, bbox_weights = multi_apply( self._get_target_single, pos_inds_list, neg_inds_list, pos_bboxes_list, neg_bboxes_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, ModuleList from mmdet.models.backbones.resnet import Bottleneck from mmdet.models.builder import HEADS from .bbox_head import BBoxHead class BasicResBlock(BaseModule): """Basic residual block. This block is a little different from the block in the ResNet backbone. The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. Args: in_channels (int): Channels of the input feature map. out_channels (int): Channels of the output feature map. conv_cfg (dict): The config dict for convolution layers. norm_cfg (dict): The config dict for normalization layers. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN'), init_cfg=None): super(BasicResBlock, self).__init__(init_cfg) # main path self.conv1 = ConvModule( in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.conv2 = ConvModule( in_channels, out_channels, kernel_size=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) # identity path self.conv_identity = ConvModule( in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.relu = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) identity = self.conv_identity(identity) out = x + identity out = self.relu(out) return out @HEADS.register_module() class DoubleConvFCBBoxHead(BBoxHead): r"""Bbox head used in Double-Head R-CNN .. code-block:: none /-> cls /-> shared convs -> \-> reg roi features /-> cls \-> shared fc -> \-> reg """ # noqa: W605 def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), init_cfg=dict( type='Normal', override=[ dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg', std=0.001), dict( type='Xavier', name='fc_branch', distribution='uniform') ]), **kwargs): kwargs.setdefault('with_avg_pool', True) super(DoubleConvFCBBoxHead, self).__init__(init_cfg=init_cfg, **kwargs) assert self.with_avg_pool assert num_convs > 0 assert num_fcs > 0 self.num_convs = num_convs self.num_fcs = num_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # increase the channel of input features self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels) # add conv heads self.conv_branch = self._add_conv_branch() # add fc heads self.fc_branch = self._add_fc_branch() out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1) self.relu = nn.ReLU(inplace=True) def _add_conv_branch(self): """Add the fc branch which consists of a sequential of conv layers.""" branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs def _add_fc_branch(self): """Add the fc branch which consists of a sequential of fc layers.""" branch_fcs = ModuleList() for i in range(self.num_fcs): fc_in_channels = ( self.in_channels * self.roi_feat_area if i == 0 else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) return branch_fcs def forward(self, x_cls, x_reg): # conv head x_conv = self.res_block(x_reg) for conv in self.conv_branch: x_conv = conv(x_conv) if self.with_avg_pool: x_conv = self.avg_pool(x_conv) x_conv = x_conv.view(x_conv.size(0), -1) bbox_pred = self.fc_reg(x_conv) # fc head x_fc = x_cls.view(x_cls.size(0), -1) for fc in self.fc_branch: x_fc = self.relu(fc(x_fc)) cls_score = self.fc_cls(x_fc) return cls_score, bbox_pred ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/sabl_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, force_fp32 from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms from mmdet.models.builder import HEADS, build_loss from mmdet.models.losses import accuracy @HEADS.register_module() class SABLHead(BaseModule): """Side-Aware Boundary Localization (SABL) for RoI-Head. Side-Aware features are extracted by conv layers with an attention mechanism. Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented in BucketingBBoxCoder. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: cls_in_channels (int): Input channels of cls RoI feature. \ Defaults to 256. reg_in_channels (int): Input channels of reg RoI feature. \ Defaults to 256. roi_feat_size (int): Size of RoI features. Defaults to 7. reg_feat_up_ratio (int): Upsample ratio of reg features. \ Defaults to 2. reg_pre_kernel (int): Kernel of 2D conv layers before \ attention pooling. Defaults to 3. reg_post_kernel (int): Kernel of 1D conv layers after \ attention pooling. Defaults to 3. reg_pre_num (int): Number of pre convs. Defaults to 2. reg_post_num (int): Number of post convs. Defaults to 1. num_classes (int): Number of classes in dataset. Defaults to 80. cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024. reg_offset_out_channels (int): Hidden and output channel \ of reg offset branch. Defaults to 256. reg_cls_out_channels (int): Hidden and output channel \ of reg cls branch. Defaults to 256. num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1. num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0. reg_class_agnostic (bool): Class agnostic regression or not. \ Defaults to True. norm_cfg (dict): Config of norm layers. Defaults to None. bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'. loss_cls (dict): Config of classification loss. loss_bbox_cls (dict): Config of classification loss for bbox branch. loss_bbox_reg (dict): Config of regression loss for bbox branch. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_classes, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict( type='SmoothL1Loss', beta=0.1, loss_weight=1.0), init_cfg=None): super(SABLHead, self).__init__(init_cfg) self.cls_in_channels = cls_in_channels self.reg_in_channels = reg_in_channels self.roi_feat_size = roi_feat_size self.reg_feat_up_ratio = int(reg_feat_up_ratio) self.num_buckets = bbox_coder['num_buckets'] assert self.reg_feat_up_ratio // 2 >= 1 self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio assert self.up_reg_feat_size == bbox_coder['num_buckets'] self.reg_pre_kernel = reg_pre_kernel self.reg_post_kernel = reg_post_kernel self.reg_pre_num = reg_pre_num self.reg_post_num = reg_post_num self.num_classes = num_classes self.cls_out_channels = cls_out_channels self.reg_offset_out_channels = reg_offset_out_channels self.reg_cls_out_channels = reg_cls_out_channels self.num_cls_fcs = num_cls_fcs self.num_reg_fcs = num_reg_fcs self.reg_class_agnostic = reg_class_agnostic assert self.reg_class_agnostic self.norm_cfg = norm_cfg self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox_cls = build_loss(loss_bbox_cls) self.loss_bbox_reg = build_loss(loss_bbox_reg) self.cls_fcs = self._add_fc_branch(self.num_cls_fcs, self.cls_in_channels, self.roi_feat_size, self.cls_out_channels) self.side_num = int(np.ceil(self.num_buckets / 2)) if self.reg_feat_up_ratio > 1: self.upsample_x = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.upsample_y = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.reg_pre_convs = nn.ModuleList() for i in range(self.reg_pre_num): reg_pre_conv = ConvModule( reg_in_channels, reg_in_channels, kernel_size=reg_pre_kernel, padding=reg_pre_kernel // 2, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_pre_convs.append(reg_pre_conv) self.reg_post_conv_xs = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_x = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(1, reg_post_kernel), padding=(0, reg_post_kernel // 2), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_xs.append(reg_post_conv_x) self.reg_post_conv_ys = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_y = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(reg_post_kernel, 1), padding=(reg_post_kernel // 2, 0), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_ys.append(reg_post_conv_y) self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1) self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1) self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1) self.relu = nn.ReLU(inplace=True) self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_cls_out_channels) self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_offset_out_channels) self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1) self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1) if init_cfg is None: self.init_cfg = [ dict( type='Xavier', layer='Linear', distribution='uniform', override=[ dict(type='Normal', name='reg_conv_att_x', std=0.01), dict(type='Normal', name='reg_conv_att_y', std=0.01), dict(type='Normal', name='fc_reg_cls', std=0.01), dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg_offset', std=0.001) ]) ] if self.reg_feat_up_ratio > 1: self.init_cfg += [ dict( type='Kaiming', distribution='normal', override=[ dict(name='upsample_x'), dict(name='upsample_y') ]) ] @property def custom_cls_channels(self): return getattr(self.loss_cls, 'custom_cls_channels', False) @property def custom_activation(self): return getattr(self.loss_cls, 'custom_activation', False) @property def custom_accuracy(self): return getattr(self.loss_cls, 'custom_accuracy', False) def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size, fc_out_channels): in_channels = in_channels * roi_feat_size * roi_feat_size branch_fcs = nn.ModuleList() for i in range(num_branch_fcs): fc_in_channels = (in_channels if i == 0 else fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels)) return branch_fcs def cls_forward(self, cls_x): cls_x = cls_x.view(cls_x.size(0), -1) for fc in self.cls_fcs: cls_x = self.relu(fc(cls_x)) cls_score = self.fc_cls(cls_x) return cls_score def attention_pool(self, reg_x): """Extract direction-specific features fx and fy with attention methanism.""" reg_fx = reg_x reg_fy = reg_x reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid() reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid() reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2) reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3) reg_fx = (reg_fx * reg_fx_att).sum(dim=2) reg_fy = (reg_fy * reg_fy_att).sum(dim=3) return reg_fx, reg_fy def side_aware_feature_extractor(self, reg_x): """Refine and extract side-aware features without split them.""" for reg_pre_conv in self.reg_pre_convs: reg_x = reg_pre_conv(reg_x) reg_fx, reg_fy = self.attention_pool(reg_x) if self.reg_post_num > 0: reg_fx = reg_fx.unsqueeze(2) reg_fy = reg_fy.unsqueeze(3) for i in range(self.reg_post_num): reg_fx = self.reg_post_conv_xs[i](reg_fx) reg_fy = self.reg_post_conv_ys[i](reg_fy) reg_fx = reg_fx.squeeze(2) reg_fy = reg_fy.squeeze(3) if self.reg_feat_up_ratio > 1: reg_fx = self.relu(self.upsample_x(reg_fx)) reg_fy = self.relu(self.upsample_y(reg_fy)) reg_fx = torch.transpose(reg_fx, 1, 2) reg_fy = torch.transpose(reg_fy, 1, 2) return reg_fx.contiguous(), reg_fy.contiguous() def reg_pred(self, x, offset_fcs, cls_fcs): """Predict bucketing estimation (cls_pred) and fine regression (offset pred) with side-aware features.""" x_offset = x.view(-1, self.reg_in_channels) x_cls = x.view(-1, self.reg_in_channels) for fc in offset_fcs: x_offset = self.relu(fc(x_offset)) for fc in cls_fcs: x_cls = self.relu(fc(x_cls)) offset_pred = self.fc_reg_offset(x_offset) cls_pred = self.fc_reg_cls(x_cls) offset_pred = offset_pred.view(x.size(0), -1) cls_pred = cls_pred.view(x.size(0), -1) return offset_pred, cls_pred def side_aware_split(self, feat): """Split side-aware features aligned with orders of bucketing targets.""" l_end = int(np.ceil(self.up_reg_feat_size / 2)) r_start = int(np.floor(self.up_reg_feat_size / 2)) feat_fl = feat[:, :l_end] feat_fr = feat[:, r_start:].flip(dims=(1, )) feat_fl = feat_fl.contiguous() feat_fr = feat_fr.contiguous() feat = torch.cat([feat_fl, feat_fr], dim=-1) return feat def bbox_pred_split(self, bbox_pred, num_proposals_per_img): """Split batch bbox prediction back to each image.""" bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0) bucket_offset_preds = bucket_offset_preds.split( num_proposals_per_img, 0) bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds)) return bbox_pred def reg_forward(self, reg_x): outs = self.side_aware_feature_extractor(reg_x) edge_offset_preds = [] edge_cls_preds = [] reg_fx = outs[0] reg_fy = outs[1] offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_x = self.side_aware_split(offset_pred_x) offset_pred_y = self.side_aware_split(offset_pred_y) cls_pred_x = self.side_aware_split(cls_pred_x) cls_pred_y = self.side_aware_split(cls_pred_y) edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1) edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1) return (edge_cls_preds, edge_offset_preds) def forward(self, x): bbox_pred = self.reg_forward(x) cls_score = self.cls_forward(x) return cls_score, bbox_pred def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] neg_proposals = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels = [res.pos_gt_labels for res in sampling_results] cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, rcnn_train_cfg) (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = cls_reg_targets return (labels, label_weights, (bucket_cls_targets, bucket_offset_targets), (bucket_cls_weights, bucket_offset_weights)) def bucket_target(self, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, rcnn_train_cfg, concat=True): (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = multi_apply( self._bucket_target_single, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bucket_cls_targets = torch.cat(bucket_cls_targets, 0) bucket_cls_weights = torch.cat(bucket_cls_weights, 0) bucket_offset_targets = torch.cat(bucket_offset_targets, 0) bucket_offset_weights = torch.cat(bucket_offset_weights, 0) return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def _bucket_target_single(self, pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, cfg): """Compute bucketing estimation targets and fine regression targets for a single image. Args: pos_proposals (Tensor): positive proposals of a single image, Shape (n_pos, 4) neg_proposals (Tensor): negative proposals of a single image, Shape (n_neg, 4). pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals of a single image, Shape (n_pos, 4). pos_gt_labels (Tensor): gt labels assigned to positive proposals of a single image, Shape (n_pos, ). cfg (dict): Config of calculating targets Returns: tuple: - labels (Tensor): Labels in a single image. \ Shape (n,). - label_weights (Tensor): Label weights in a single image.\ Shape (n,) - bucket_cls_targets (Tensor): Bucket cls targets in \ a single image. Shape (n, num_buckets*2). - bucket_cls_weights (Tensor): Bucket cls weights in \ a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset targets \ in a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset weights \ in a single image. Shape (n, num_buckets*2). """ num_pos = pos_proposals.size(0) num_neg = neg_proposals.size(0) num_samples = num_pos + num_neg labels = pos_gt_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_proposals.new_zeros(num_samples) bucket_cls_targets = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_cls_weights = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_offset_targets = pos_proposals.new_zeros( num_samples, 4 * self.side_num) bucket_offset_weights = pos_proposals.new_zeros( num_samples, 4 * self.side_num) if num_pos > 0: labels[:num_pos] = pos_gt_labels label_weights[:num_pos] = 1.0 (pos_bucket_offset_targets, pos_bucket_offset_weights, pos_bucket_cls_targets, pos_bucket_cls_weights) = self.bbox_coder.encode( pos_proposals, pos_gt_bboxes) bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights if num_neg > 0: label_weights[-num_neg:] = 1.0 return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def loss(self, cls_score, bbox_pred, rois, labels, label_weights, bbox_targets, bbox_weights, reduction_override=None): losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_targets, bucket_offset_targets = bbox_targets bucket_cls_weights, bucket_offset_weights = bbox_weights # edge cls bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num) bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num) bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num) losses['loss_bbox_cls'] = self.loss_bbox_cls( bucket_cls_preds, bucket_cls_targets, bucket_cls_weights, avg_factor=bucket_cls_targets.size(0), reduction_override=reduction_override) losses['loss_bbox_reg'] = self.loss_bbox_reg( bucket_offset_preds, bucket_offset_targets, bucket_offset_weights, avg_factor=bucket_offset_targets.size(0), reduction_override=reduction_override) return losses @force_fp32(apply_to=('cls_score', 'bbox_pred')) def get_bboxes(self, rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None): if isinstance(cls_score, list): cls_score = sum(cls_score) / float(len(cls_score)) scores = F.softmax(cls_score, dim=1) if cls_score is not None else None if bbox_pred is not None: bboxes, confidences = self.bbox_coder.decode( rois[:, 1:], bbox_pred, img_shape) else: bboxes = rois[:, 1:].clone() confidences = None if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) if rescale and bboxes.size(0) > 0: if isinstance(scale_factor, float): bboxes /= scale_factor else: bboxes /= torch.from_numpy(scale_factor).to(bboxes.device) if cfg is None: return bboxes, scores else: det_bboxes, det_labels = multiclass_nms( bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=confidences) return det_bboxes, det_labels @force_fp32(apply_to=('bbox_preds', )) def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): """Refine bboxes during training. Args: rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, and bs is the sampled RoIs per image. labels (Tensor): Shape (n*bs, ). bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \ (n*bs, num_buckets*2)]. pos_is_gts (list[Tensor]): Flags indicating if each positive bbox is a gt bbox. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. """ img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() == len(img_metas) bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] edge_cls_preds, edge_offset_preds = bbox_preds edge_cls_preds_ = edge_cls_preds[inds] edge_offset_preds_ = edge_offset_preds[inds] bbox_pred_ = [edge_cls_preds_, edge_offset_preds_] img_meta_ = img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) return bboxes_list @force_fp32(apply_to=('bbox_pred', )) def regress_by_class(self, rois, label, bbox_pred, img_meta): """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): shape (n, 4) or (n, 5) label (Tensor): shape (n, ) bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \ (n, num_buckets *2)] img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5 if rois.size(1) == 4: new_rois, _ = self.bbox_coder.decode(rois, bbox_pred, img_meta['img_shape']) else: bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred, img_meta['img_shape']) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmdet.models.builder import HEADS from .convfc_bbox_head import ConvFCBBoxHead @HEADS.register_module() class SCNetBBoxHead(ConvFCBBoxHead): """BBox head for `SCNet `_. This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us to get intermediate shared feature. """ def _forward_shared(self, x): """Forward function for shared part.""" if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) return x def _forward_cls_reg(self, x): """Forward function for classification and regression parts.""" x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None return cls_score, bbox_pred def forward(self, x, return_shared_feat=False): """Forward function. Args: x (Tensor): input features return_shared_feat (bool): If True, return cls-reg-shared feature. Return: out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``, if ``return_shared_feat`` is True, append ``x_shared`` to the returned tuple. """ x_shared = self._forward_shared(x) out = self._forward_cls_reg(x_shared) if return_shared_feat: out += (x_shared, ) return out ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/cascade_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn from mmcv.runner import ModuleList from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from .base_roi_head import BaseRoIHead from .test_mixins import BBoxTestMixin, MaskTestMixin @HEADS.register_module() class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): """Cascade roi head including one bbox head and one mask head. https://arxiv.org/abs/1712.00726 """ def __init__(self, num_stages, stage_loss_weights, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): assert bbox_roi_extractor is not None assert bbox_head is not None assert shared_head is None, \ 'Shared head is not supported in Cascade RCNN anymore' self.num_stages = num_stages self.stage_loss_weights = stage_loss_weights super(CascadeRoIHead, self).__init__( bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, shared_head=shared_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize box head and box roi extractor. Args: bbox_roi_extractor (dict): Config of box roi extractor. bbox_head (dict): Config of box in box head. """ self.bbox_roi_extractor = ModuleList() self.bbox_head = ModuleList() if not isinstance(bbox_roi_extractor, list): bbox_roi_extractor = [ bbox_roi_extractor for _ in range(self.num_stages) ] if not isinstance(bbox_head, list): bbox_head = [bbox_head for _ in range(self.num_stages)] assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor)) self.bbox_head.append(build_head(head)) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize mask head and mask roi extractor. Args: mask_roi_extractor (dict): Config of mask roi extractor. mask_head (dict): Config of mask in mask head. """ self.mask_head = nn.ModuleList() if not isinstance(mask_head, list): mask_head = [mask_head for _ in range(self.num_stages)] assert len(mask_head) == self.num_stages for head in mask_head: self.mask_head.append(build_head(head)) if mask_roi_extractor is not None: self.share_roi_extractor = False self.mask_roi_extractor = ModuleList() if not isinstance(mask_roi_extractor, list): mask_roi_extractor = [ mask_roi_extractor for _ in range(self.num_stages) ] assert len(mask_roi_extractor) == self.num_stages for roi_extractor in mask_roi_extractor: self.mask_roi_extractor.append( build_roi_extractor(roi_extractor)) else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor def init_assigner_sampler(self): """Initialize assigner and sampler for each stage.""" self.bbox_assigner = [] self.bbox_sampler = [] if self.train_cfg is not None: for idx, rcnn_train_cfg in enumerate(self.train_cfg): self.bbox_assigner.append( build_assigner(rcnn_train_cfg.assigner)) self.current_stage = idx self.bbox_sampler.append( build_sampler(rcnn_train_cfg.sampler, context=self)) def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask heads if self.with_mask: mask_rois = rois[:100] for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def _bbox_forward(self, stage, x, rois): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg): """Run forward function and calculate loss for box head in training.""" rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(stage, x, rois) bbox_targets = self.bbox_head[stage].get_targets( sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) return bbox_results def _mask_forward(self, stage, x, rois): """Mask head forward function used in both training and testing.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore mask_pred = mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred) return mask_results def _mask_forward_train(self, stage, x, sampling_results, gt_masks, rcnn_train_cfg, bbox_feats=None): """Run forward function and calculate loss for mask head in training.""" pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward(stage, x, pos_rois) mask_targets = self.mask_head[stage].get_targets( sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results.update(loss_mask=loss_mask) return mask_results def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ losses = dict() for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] if self.with_bbox or self.with_mask: bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign( proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = self._bbox_forward_train(i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train( i, x, sampling_results, gt_masks, rcnn_train_cfg, bbox_results['bbox_feats']) for name, value in mask_results['loss_mask'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine bboxes if i < self.num_stages - 1: pos_is_gts = [res.pos_is_gt for res in sampling_results] # bbox_targets is a tuple roi_labels = bbox_results['bbox_targets'][0] with torch.no_grad(): cls_score = bbox_results['cls_score'] if self.bbox_head[i].custom_activation: cls_score = self.bbox_head[i].loss_cls.get_activation( cls_score) # Empty proposal. if cls_score.numel() == 0: break roi_labels = torch.where( roi_labels == self.bbox_head[i].num_classes, cls_score[:, :-1].argmax(1), roi_labels) proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ assert self.with_bbox, 'Bbox head must be implemented.' num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) if rois.shape[0] == 0: # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple( len(proposals) for proposals in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) if isinstance(bbox_pred, torch.Tensor): bbox_pred = bbox_pred.split(num_proposals_per_img, 0) else: bbox_pred = self.bbox_head[i].bbox_pred_split( bbox_pred, num_proposals_per_img) ms_scores.append(cls_score) if i < self.num_stages - 1: if self.bbox_head[i].custom_activation: cls_score = [ self.bbox_head[i].loss_cls.get_activation(s) for s in cls_score ] refine_rois_list = [] for j in range(num_imgs): if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refined_rois = self.bbox_head[i].regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_rois_list.append(refined_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] ms_bbox_result['ensemble'] = bbox_results if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_rois = bbox2roi(_bboxes) num_mask_rois_per_img = tuple( _bbox.size(0) for _bbox in _bboxes) aug_masks = [] for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image mask_pred = mask_pred.split(num_mask_rois_per_img, 0) aug_masks.append([ m.sigmoid().cpu().detach().numpy() for m in mask_pred ]) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head[-1].num_classes)]) else: aug_mask = [mask[i] for mask in aug_masks] merged_masks = merge_aug_masks( aug_mask, [[img_metas[i]]] * self.num_stages, rcnn_test_cfg) segm_result = self.mask_head[-1].get_seg_masks( merged_masks, _bboxes[i], det_labels[i], rcnn_test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) ms_segm_result['ensemble'] = segm_results if self.with_mask: results = list( zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) else: results = ms_bbox_result['ensemble'] return results def aug_test(self, features, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta in zip(features, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) if rois.shape[0] == 0: # There is no proposal in the single image aug_bboxes.append(rois.new_zeros(0, 4)) aug_scores.append(rois.new_zeros(0, 1)) continue for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: cls_score = bbox_results['cls_score'] if self.bbox_head[i].custom_activation: cls_score = self.bbox_head[i].loss_cls.get_activation( cls_score) bbox_label = cls_score[:, :-1].argmax(dim=1) rois = self.bbox_head[i].regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head[-1].num_classes)] else: aug_masks = [] aug_img_metas = [] for x, img_meta in zip(features, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] dummy_scale_factor = np.ones(4) segm_result = self.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=dummy_scale_factor, rescale=False) return [(bbox_result, segm_result)] else: return [bbox_result] def onnx_export(self, x, proposals, img_metas): assert self.with_bbox, 'Bbox head must be implemented.' assert proposals.shape[0] == 1, 'Only support one input image ' \ 'while in exporting to ONNX' # remove the scores rois = proposals[..., :-1] batch_size = rois.shape[0] num_proposals_per_img = rois.shape[1] # Eliminate the batch dimension rois = rois.view(-1, 4) # add dummy batch index rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) max_shape = img_metas[0]['img_shape_for_onnx'] ms_scores = [] rcnn_test_cfg = self.test_cfg for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] # Recover the batch dimension rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) cls_score = cls_score.reshape(batch_size, num_proposals_per_img, cls_score.size(-1)) bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) ms_scores.append(cls_score) if i < self.num_stages - 1: assert self.bbox_head[i].reg_class_agnostic new_rois = self.bbox_head[i].bbox_coder.decode( rois[..., 1:], bbox_pred, max_shape=max_shape) rois = new_rois.reshape(-1, new_rois.shape[-1]) # add dummy batch index rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) cls_score = sum(ms_scores) / float(len(ms_scores)) bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) rois = rois.reshape(batch_size, num_proposals_per_img, -1) det_bboxes, det_labels = self.bbox_head[-1].onnx_export( rois, cls_score, bbox_pred, max_shape, cfg=rcnn_test_cfg) if not self.with_mask: return det_bboxes, det_labels else: batch_index = torch.arange( det_bboxes.size(0), device=det_bboxes.device).float().view(-1, 1, 1).expand( det_bboxes.size(0), det_bboxes.size(1), 1) rois = det_bboxes[..., :4] mask_rois = torch.cat([batch_index, rois], dim=-1) mask_rois = mask_rois.view(-1, 5) aug_masks = [] for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) mask_pred = mask_results['mask_pred'] aug_masks.append(mask_pred) max_shape = img_metas[0]['img_shape_for_onnx'] # calculate the mean of masks from several stage mask_pred = sum(aug_masks) / len(aug_masks) segm_results = self.mask_head[-1].onnx_export( mask_pred, rois.reshape(-1, 4), det_labels.reshape(-1), self.test_cfg, max_shape) segm_results = segm_results.reshape(batch_size, det_bboxes.shape[1], max_shape[0], max_shape[1]) return det_bboxes, det_labels, segm_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/double_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): """RoI head for Double Head RCNN. https://arxiv.org/abs/1904.06493 """ def __init__(self, reg_roi_scale_factor, **kwargs): super(DoubleHeadRoIHead, self).__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_factor def _bbox_forward(self, x, rois): """Box head forward function used in both training and testing time.""" bbox_cls_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_cls_feats) return bbox_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/dynamic_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.core import bbox2roi from mmdet.models.losses import SmoothL1Loss from ..builder import HEADS from .standard_roi_head import StandardRoIHead EPS = 1e-15 @HEADS.register_module() class DynamicRoIHead(StandardRoIHead): """RoI head for `Dynamic R-CNN `_.""" def __init__(self, **kwargs): super(DynamicRoIHead, self).__init__(**kwargs) assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss) # the IoU history of the past `update_iter_interval` iterations self.iou_history = [] # the beta history of the past `update_iter_interval` iterations self.beta_history = [] def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """Forward function for training. Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # assign gts and sample proposals if self.with_bbox or self.with_mask: num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] cur_iou = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign( proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) # record the `iou_topk`-th largest IoU in an image iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk, len(assign_result.max_overlaps)) ious, _ = torch.topk(assign_result.max_overlaps, iou_topk) cur_iou.append(ious[-1].item()) sampling_results.append(sampling_result) # average the current IoUs over images cur_iou = np.mean(cur_iou) self.iou_history.append(cur_iou) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) # update IoU threshold and SmoothL1 beta update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval if len(self.iou_history) % update_iter_interval == 0: new_iou_thr, new_beta = self.update_hyperparameters() return losses def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): num_imgs = len(img_metas) rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) # record the `beta_topk`-th smallest target # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets # and bbox_weights, respectively pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1) num_pos = len(pos_inds) cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1) beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs, num_pos) cur_target = torch.kthvalue(cur_target, beta_topk)[0].item() self.beta_history.append(cur_target) loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update(loss_bbox=loss_bbox) return bbox_results def update_hyperparameters(self): """Update hyperparameters like IoU thresholds for assigner and beta for SmoothL1 loss based on the training statistics. Returns: tuple[float]: the updated ``iou_thr`` and ``beta``. """ new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou, np.mean(self.iou_history)) self.iou_history = [] self.bbox_assigner.pos_iou_thr = new_iou_thr self.bbox_assigner.neg_iou_thr = new_iou_thr self.bbox_assigner.min_pos_iou = new_iou_thr if (np.median(self.beta_history) < EPS): # avoid 0 or too small value for new_beta new_beta = self.bbox_head.loss_bbox.beta else: new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta, np.median(self.beta_history)) self.beta_history = [] self.bbox_head.loss_bbox.beta = new_beta return new_iou_thr, new_beta ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/grid_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.core import bbox2result, bbox2roi from ..builder import HEADS, build_head, build_roi_extractor from .standard_roi_head import StandardRoIHead @HEADS.register_module() class GridRoIHead(StandardRoIHead): """Grid roi head for Grid R-CNN. https://arxiv.org/abs/1811.12030 """ def __init__(self, grid_roi_extractor, grid_head, **kwargs): assert grid_head is not None super(GridRoIHead, self).__init__(**kwargs) if grid_roi_extractor is not None: self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.grid_roi_extractor = self.bbox_roi_extractor self.grid_head = build_head(grid_head) def _random_jitter(self, sampling_results, img_metas, amplitude=0.15): """Ramdom jitter positive proposals for training.""" for sampling_result, img_meta in zip(sampling_results, img_metas): bboxes = sampling_result.pos_bboxes random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_( -amplitude, amplitude) # before jittering cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2 wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs() # after jittering new_cxcy = cxcy + wh * random_offsets[:, :2] new_wh = wh * (1 + random_offsets[:, 2:]) # xywh to xyxy new_x1y1 = (new_cxcy - new_wh / 2) new_x2y2 = (new_cxcy + new_wh / 2) new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1) # clip bboxes max_shape = img_meta['img_shape'] if max_shape is not None: new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1) new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1) sampling_result.pos_bboxes = new_bboxes return sampling_results def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # grid head grid_rois = rois[:100] grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], grid_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) grid_pred = self.grid_head(grid_feats) outs = outs + (grid_pred, ) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): """Run forward function and calculate loss for box head in training.""" bbox_results = super(GridRoIHead, self)._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) # Grid head forward and loss sampling_results = self._random_jitter(sampling_results, img_metas) pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) # GN in head does not support zero shape input if pos_rois.shape[0] == 0: return bbox_results grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) # Accelerate training max_sample_num_grid = self.train_cfg.get('max_num_grid', 192) sample_idx = torch.randperm( grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid )] grid_feats = grid_feats[sample_idx] grid_pred = self.grid_head(grid_feats) grid_targets = self.grid_head.get_targets(sampling_results, self.train_cfg) grid_targets = grid_targets[sample_idx] loss_grid = self.grid_head.loss(grid_pred, grid_targets) bbox_results['loss_bbox'].update(loss_grid) return bbox_results def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = self.simple_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=False) # pack rois into bboxes grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes]) if grid_rois.shape[0] != 0: grid_feats = self.grid_roi_extractor( x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) self.grid_head.test_mode = True grid_pred = self.grid_head(grid_feats) # split batch grid head prediction back to each image num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes) grid_pred = { k: v.split(num_roi_per_img, 0) for k, v in grid_pred.items() } # apply bbox post-processing to each image individually bbox_results = [] num_imgs = len(det_bboxes) for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: bbox_results.append([ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head.num_classes) ]) else: det_bbox = self.grid_head.get_bboxes( det_bboxes[i], grid_pred['fused'][i], [img_metas[i]]) if rescale: det_bbox[:, :4] /= img_metas[i]['scale_factor'] bbox_results.append( bbox2result(det_bbox, det_labels[i], self.bbox_head.num_classes)) else: bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head.num_classes) ] for _ in range(len(det_bboxes))] if not self.with_mask: return bbox_results else: segm_results = self.simple_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale) return list(zip(bbox_results, segm_results)) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/htc_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn.functional as F from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from ..utils.brick_wrappers import adaptive_avg_pool2d from .cascade_roi_head import CascadeRoIHead @HEADS.register_module() class HybridTaskCascadeRoIHead(CascadeRoIHead): """Hybrid task cascade roi head including one bbox head and one mask head. https://arxiv.org/abs/1901.07518 """ def __init__(self, num_stages, stage_loss_weights, semantic_roi_extractor=None, semantic_head=None, semantic_fusion=('bbox', 'mask'), interleaved=True, mask_info_flow=True, **kwargs): super(HybridTaskCascadeRoIHead, self).__init__(num_stages, stage_loss_weights, **kwargs) assert self.with_bbox assert not self.with_shared_head # shared head is not supported if semantic_head is not None: self.semantic_roi_extractor = build_roi_extractor( semantic_roi_extractor) self.semantic_head = build_head(semantic_head) self.semantic_fusion = semantic_fusion self.interleaved = interleaved self.mask_info_flow = mask_info_flow @property def with_semantic(self): """bool: whether the head has semantic head""" if hasattr(self, 'semantic_head') and self.semantic_head is not None: return True else: return False def forward_dummy(self, x, proposals): """Dummy forward function.""" outs = () # semantic head if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None # bbox heads rois = bbox2roi([proposals]) for i in range(self.num_stages): bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask heads if self.with_mask: mask_rois = rois[:100] mask_roi_extractor = self.mask_roi_extractor[-1] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) mask_feats = mask_feats + mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head(mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) outs = outs + (mask_pred, ) return outs def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat=None): """Run forward function and calculate loss for box head in training.""" bbox_head = self.bbox_head[stage] rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward( stage, x, rois, semantic_feat=semantic_feat) bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets, ) return bbox_results def _mask_forward_train(self, stage, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat=None): """Run forward function and calculate loss for mask head in training.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], pos_rois) # semantic feature fusion # element-wise sum for original features and pooled semantic features if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], pos_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats = mask_feats + mask_semantic_feat # mask information flow # forward all previous mask heads to obtain last_feat, and fuse it # with the normal mask feature if self.mask_info_flow: last_feat = None for i in range(stage): last_feat = self.mask_head[i]( mask_feats, last_feat, return_logits=False) mask_pred = mask_head(mask_feats, last_feat, return_feat=False) else: mask_pred = mask_head(mask_feats, return_feat=False) mask_targets = mask_head.get_targets(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) mask_results = dict(loss_mask=loss_mask) return mask_results def _bbox_forward(self, stage, x, rois, semantic_feat=None): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_semantic and 'bbox' in self.semantic_fusion: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats = bbox_feats + bbox_semantic_feat cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) return bbox_results def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None): """Mask head forward function for testing.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_rois = bbox2roi([bboxes]) mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats = mask_feats + mask_semantic_feat if self.mask_info_flow: last_feat = None last_pred = None for i in range(stage): mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat) if last_pred is not None: mask_pred = mask_pred + last_pred last_pred = mask_pred mask_pred = mask_head(mask_feats, last_feat, return_feat=False) if last_pred is not None: mask_pred = mask_pred + last_pred else: mask_pred = mask_head(mask_feats) return mask_pred def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, gt_semantic_seg=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposal_list (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None, list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None, Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. gt_semantic_seg (None, list[Tensor]): semantic segmentation masks used if the architecture supports semantic segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # semantic segmentation part # 2 outputs: segmentation prediction and embedded features losses = dict() if self.with_semantic: semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) losses['loss_semantic_seg'] = loss_seg else: semantic_feat = None for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = \ self._bbox_forward_train( i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat) roi_labels = bbox_results['bbox_targets'][0] for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: # interleaved execution: use regressed bboxes by the box branch # to train the mask branch if self.interleaved: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) # re-assign and sample 512 RoIs from 512 RoIs sampling_results = [] for j in range(num_imgs): assign_result = bbox_assigner.assign( proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) mask_results = self._mask_forward_train( i, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat) for name, value in mask_results['loss_mask'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine bboxes (same as Cascade R-CNN) if i < self.num_stages - 1 and not self.interleaved: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) if rois.shape[0] == 0: # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) bbox_pred = bbox_pred.split(num_proposals_per_img, 0) ms_scores.append(cls_score) if i < self.num_stages - 1: refine_rois_list = [] for j in range(num_imgs): if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refine_rois = bbox_head.regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_rois_list.append(refine_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) bbox_result = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] ms_bbox_result['ensemble'] = bbox_result if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) aug_masks = [] mask_roi_extractor = self.mask_roi_extractor[-1] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) mask_feats = mask_feats + mask_semantic_feat last_feat = None num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head(mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) # split batch mask prediction back to each image mask_pred = mask_pred.split(num_bbox_per_img, 0) aug_masks.append( [mask.sigmoid().cpu().numpy() for mask in mask_pred]) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head[-1].num_classes)]) else: aug_mask = [mask[i] for mask in aug_masks] merged_mask = merge_aug_masks( aug_mask, [[img_metas[i]]] * self.num_stages, rcnn_test_cfg) segm_result = self.mask_head[-1].get_seg_masks( merged_mask, _bboxes[i], det_labels[i], rcnn_test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) ms_segm_result['ensemble'] = segm_results if self.with_mask: results = list( zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) else: results = ms_bbox_result['ensemble'] return results def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ if self.with_semantic: semantic_feats = [ self.semantic_head(feat)[1] for feat in img_feats ] else: semantic_feats = [None] * len(img_metas) rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) if rois.shape[0] == 0: # There is no proposal in the single image aug_bboxes.append(rois.new_zeros(0, 4)) aug_scores.append(rois.new_zeros(0, 1)) continue for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: bbox_label = bbox_results['cls_score'].argmax(dim=1) rois = bbox_head.regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head[-1].num_classes)] else: aug_masks = [] aug_img_metas = [] for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor[-1]( x[:len(self.mask_roi_extractor[-1].featmap_strides)], mask_rois) if self.with_semantic: semantic_feat = semantic mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[ -2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats = mask_feats + mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head( mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return [(bbox_result, segm_result)] else: return [bbox_result] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .coarse_mask_head import CoarseMaskHead from .dynamic_mask_head import DynamicMaskHead from .fcn_mask_head import FCNMaskHead from .feature_relay_head import FeatureRelayHead from .fused_semantic_head import FusedSemanticHead from .global_context_head import GlobalContextHead from .grid_head import GridHead from .htc_mask_head import HTCMaskHead from .mask_point_head import MaskPointHead from .maskiou_head import MaskIoUHead from .scnet_mask_head import SCNetMaskHead from .scnet_semantic_head import SCNetSemanticHead __all__ = [ 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead', 'DynamicMaskHead' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import ConvModule, Linear from mmcv.runner import ModuleList, auto_fp16 from mmdet.models.builder import HEADS from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class CoarseMaskHead(FCNMaskHead): """Coarse mask head used in PointRend. Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample the input feature map instead of upsample it. Args: num_convs (int): Number of conv layers in the head. Default: 0. num_fcs (int): Number of fc layers in the head. Default: 2. fc_out_channels (int): Number of output channels of fc layer. Default: 1024. downsample_factor (int): The factor that feature map is downsampled by. Default: 2. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_convs=0, num_fcs=2, fc_out_channels=1024, downsample_factor=2, init_cfg=dict( type='Xavier', override=[ dict(name='fcs'), dict(type='Constant', val=0.001, name='fc_logits') ]), *arg, **kwarg): super(CoarseMaskHead, self).__init__( *arg, num_convs=num_convs, upsample_cfg=dict(type=None), init_cfg=None, **kwarg) self.init_cfg = init_cfg self.num_fcs = num_fcs assert self.num_fcs > 0 self.fc_out_channels = fc_out_channels self.downsample_factor = downsample_factor assert self.downsample_factor >= 1 # remove conv_logit delattr(self, 'conv_logits') if downsample_factor > 1: downsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else self.in_channels) self.downsample_conv = ConvModule( downsample_in_channels, self.conv_out_channels, kernel_size=downsample_factor, stride=downsample_factor, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) else: self.downsample_conv = None self.output_size = (self.roi_feat_size[0] // downsample_factor, self.roi_feat_size[1] // downsample_factor) self.output_area = self.output_size[0] * self.output_size[1] last_layer_dim = self.conv_out_channels * self.output_area self.fcs = ModuleList() for i in range(num_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) self.fcs.append(Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels output_channels = self.num_classes * self.output_area self.fc_logits = Linear(last_layer_dim, output_channels) def init_weights(self): super(FCNMaskHead, self).init_weights() @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if self.downsample_conv is not None: x = self.downsample_conv(x) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_pred = self.fc_logits(x).view( x.size(0), self.num_classes, *self.output_size) return mask_pred ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.runner import auto_fp16, force_fp32 from mmdet.core import mask_target from mmdet.models.builder import HEADS from mmdet.models.dense_heads.atss_head import reduce_mean from mmdet.models.utils import build_transformer from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class DynamicMaskHead(FCNMaskHead): r"""Dynamic Mask Head for `Instances as Queries `_ Args: num_convs (int): Number of convolution layer. Defaults to 4. roi_feat_size (int): The output size of RoI extractor, Defaults to 14. in_channels (int): Input feature channels. Defaults to 256. conv_kernel_size (int): Kernel size of convolution layers. Defaults to 3. conv_out_channels (int): Output channels of convolution layers. Defaults to 256. num_classes (int): Number of classes. Defaults to 80 class_agnostic (int): Whether generate class agnostic prediction. Defaults to False. dropout (float): Probability of drop the channel. Defaults to 0.0 upsample_cfg (dict): The config for upsample layer. conv_cfg (dict): The convolution layer config. norm_cfg (dict): The norm layer config. dynamic_conv_cfg (dict): The dynamic convolution layer config. loss_mask (dict): The config for mask loss. """ def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_mask=dict(type='DiceLoss', loss_weight=8.0), **kwargs): super(DynamicMaskHead, self).__init__( num_convs=num_convs, roi_feat_size=roi_feat_size, in_channels=in_channels, conv_kernel_size=conv_kernel_size, conv_out_channels=conv_out_channels, num_classes=num_classes, class_agnostic=class_agnostic, upsample_cfg=upsample_cfg, conv_cfg=conv_cfg, norm_cfg=norm_cfg, loss_mask=loss_mask, **kwargs) assert class_agnostic is False, \ 'DynamicMaskHead only support class_agnostic=False' self.fp16_enabled = False self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) def init_weights(self): """Use xavier initialization for all weight parameter and set classification head bias as a specific value when use focal loss.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) nn.init.constant_(self.conv_logits.bias, 0.) @auto_fp16() def forward(self, roi_feat, proposal_feat): """Forward function of DynamicMaskHead. Args: roi_feat (Tensor): Roi-pooling features with shape (batch_size*num_proposals, feature_dimensions, pooling_h , pooling_w). proposal_feat (Tensor): Intermediate feature get from diihead in last stage, has shape (batch_size*num_proposals, feature_dimensions) Returns: mask_pred (Tensor): Predicted foreground masks with shape (batch_size*num_proposals, num_classes, pooling_h*2, pooling_w*2). """ proposal_feat = proposal_feat.reshape(-1, self.in_channels) proposal_feat_iic = self.instance_interactive_conv( proposal_feat, roi_feat) x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size()) for conv in self.convs: x = conv(x) if self.upsample is not None: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) return mask_pred @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, mask_targets, labels): num_pos = labels.new_ones(labels.size()).float().sum() avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item() loss = dict() if mask_pred.size(0) == 0: loss_mask = mask_pred.sum() else: loss_mask = self.loss_mask( mask_pred[torch.arange(num_pos).long(), labels, ...].sigmoid(), mask_targets, avg_factor=avg_factor) loss['loss_mask'] = loss_mask return loss def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from warnings import warn import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer from mmcv.ops.carafe import CARAFEPack from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32 from torch.nn.modules.utils import _pair from mmdet.core import mask_target from mmdet.models.builder import HEADS, build_loss BYTES_PER_FLOAT = 4 # TODO: This memory limit may be too much or too little. It would be better to # determine it based on available resources. GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit @HEADS.register_module() class FCNMaskHead(BaseModule): def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, predictor_cfg=dict(type='Conv'), loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(FCNMaskHead, self).__init__(init_cfg) self.upsample_cfg = upsample_cfg.copy() if self.upsample_cfg['type'] not in [ None, 'deconv', 'nearest', 'bilinear', 'carafe' ]: raise ValueError( f'Invalid upsample method {self.upsample_cfg["type"]}, ' 'accepted methods are "deconv", "nearest", "bilinear", ' '"carafe"') self.num_convs = num_convs # WARN: roi_feat_size is reserved and not used self.roi_feat_size = _pair(roi_feat_size) self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.conv_out_channels = conv_out_channels self.upsample_method = self.upsample_cfg.get('type') self.scale_factor = self.upsample_cfg.pop('scale_factor', None) self.num_classes = num_classes self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.predictor_cfg = predictor_cfg self.fp16_enabled = False self.loss_mask = build_loss(loss_mask) self.convs = ModuleList() for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg)) upsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else in_channels) upsample_cfg_ = self.upsample_cfg.copy() if self.upsample_method is None: self.upsample = None elif self.upsample_method == 'deconv': upsample_cfg_.update( in_channels=upsample_in_channels, out_channels=self.conv_out_channels, kernel_size=self.scale_factor, stride=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) elif self.upsample_method == 'carafe': upsample_cfg_.update( channels=upsample_in_channels, scale_factor=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) else: # suppress warnings align_corners = (None if self.upsample_method == 'nearest' else False) upsample_cfg_.update( scale_factor=self.scale_factor, mode=self.upsample_method, align_corners=align_corners) self.upsample = build_upsample_layer(upsample_cfg_) out_channels = 1 if self.class_agnostic else self.num_classes logits_in_channel = ( self.conv_out_channels if self.upsample_method == 'deconv' else upsample_in_channels) self.conv_logits = build_conv_layer(self.predictor_cfg, logits_in_channel, out_channels, 1) self.relu = nn.ReLU(inplace=True) self.debug_imgs = None def init_weights(self): super(FCNMaskHead, self).init_weights() for m in [self.upsample, self.conv_logits]: if m is None: continue elif isinstance(m, CARAFEPack): m.init_weights() elif hasattr(m, 'weight') and hasattr(m, 'bias'): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') nn.init.constant_(m.bias, 0) @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if self.upsample is not None: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) return mask_pred def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, mask_targets, labels): """ Example: >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA >>> N = 7 # N = number of extracted ROIs >>> C, H, W = 11, 32, 32 >>> # Create example instance of FCN Mask Head. >>> # There are lots of variations depending on the configuration >>> self = FCNMaskHead(num_classes=C, num_convs=1) >>> inputs = torch.rand(N, self.in_channels, H, W) >>> mask_pred = self.forward(inputs) >>> sf = self.scale_factor >>> labels = torch.randint(0, C, size=(N,)) >>> # With the default properties the mask targets should indicate >>> # a (potentially soft) single-class label >>> mask_targets = torch.rand(N, H * sf, W * sf) >>> loss = self.loss(mask_pred, mask_targets, labels) >>> print('loss = {!r}'.format(loss)) """ loss = dict() if mask_pred.size(0) == 0: loss_mask = mask_pred.sum() else: if self.class_agnostic: loss_mask = self.loss_mask(mask_pred, mask_targets, torch.zeros_like(labels)) else: loss_mask = self.loss_mask(mask_pred, mask_targets, labels) loss['loss_mask'] = loss_mask return loss def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale): """Get segmentation masks from mask_pred and bboxes. Args: mask_pred (Tensor or ndarray): shape (n, #class, h, w). For single-scale testing, mask_pred is the direct output of model, whose type is Tensor, while for multi-scale testing, it will be converted to numpy array outside of this method. det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) rcnn_test_cfg (dict): rcnn testing config ori_shape (Tuple): original image height and width, shape (2,) scale_factor(ndarray | Tensor): If ``rescale is True``, box coordinates are divided by this scale factor to fit ``ori_shape``. rescale (bool): If True, the resulting masks will be rescaled to ``ori_shape``. Returns: list[list]: encoded masks. The c-th item in the outer list corresponds to the c-th class. Given the c-th outer list, the i-th item in that inner list is the mask for the i-th box with class label c. Example: >>> import mmcv >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA >>> N = 7 # N = number of extracted ROIs >>> C, H, W = 11, 32, 32 >>> # Create example instance of FCN Mask Head. >>> self = FCNMaskHead(num_classes=C, num_convs=0) >>> inputs = torch.rand(N, self.in_channels, H, W) >>> mask_pred = self.forward(inputs) >>> # Each input is associated with some bounding box >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) >>> det_labels = torch.randint(0, C, size=(N,)) >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, }) >>> ori_shape = (H * 4, W * 4) >>> scale_factor = torch.FloatTensor((1, 1)) >>> rescale = False >>> # Encoded masks are a list for each category. >>> encoded_masks = self.get_seg_masks( >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, >>> scale_factor, rescale >>> ) >>> assert len(encoded_masks) == C >>> assert sum(list(map(len, encoded_masks))) == N """ if isinstance(mask_pred, torch.Tensor): mask_pred = mask_pred.sigmoid() else: # In AugTest, has been activated before mask_pred = det_bboxes.new_tensor(mask_pred) device = mask_pred.device cls_segms = [[] for _ in range(self.num_classes) ] # BG is not included in num_classes bboxes = det_bboxes[:, :4] labels = det_labels # In most cases, scale_factor should have been # converted to Tensor when rescale the bbox if not isinstance(scale_factor, torch.Tensor): if isinstance(scale_factor, float): scale_factor = np.array([scale_factor] * 4) warn('Scale_factor should be a Tensor or ndarray ' 'with shape (4,), float would be deprecated. ') assert isinstance(scale_factor, np.ndarray) scale_factor = torch.Tensor(scale_factor) if rescale: img_h, img_w = ori_shape[:2] bboxes = bboxes / scale_factor.to(bboxes) else: w_scale, h_scale = scale_factor[0], scale_factor[1] img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32) img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32) N = len(mask_pred) # The actual implementation split the input into chunks, # and paste them chunk by chunk. if device.type == 'cpu': # CPU is most efficient when they are pasted one by one with # skip_empty=True, so that it performs minimal number of # operations. num_chunks = N else: # GPU benefits from parallelism for larger chunks, # but may have memory issue # the types of img_w and img_h are np.int32, # when the image resolution is large, # the calculation of num_chunks will overflow. # so we need to change the types of img_w and img_h to int. # See https://github.com/open-mmlab/mmdetection/pull/5191 num_chunks = int( np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) assert (num_chunks <= N), 'Default GPU_MEM_LIMIT is too small; try increasing it' chunks = torch.chunk(torch.arange(N, device=device), num_chunks) threshold = rcnn_test_cfg.mask_thr_binary im_mask = torch.zeros( N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8) if not self.class_agnostic: mask_pred = mask_pred[range(N), labels][:, None] for inds in chunks: masks_chunk, spatial_inds = _do_paste_mask( mask_pred[inds], bboxes[inds], img_h, img_w, skip_empty=device.type == 'cpu') if threshold >= 0: masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) else: # for visualization and debugging masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) im_mask[(inds, ) + spatial_inds] = masks_chunk for i in range(N): cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy()) return cls_segms def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, **kwargs): """Get segmentation masks from mask_pred and bboxes. Args: mask_pred (Tensor): shape (n, #class, h, w). det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) rcnn_test_cfg (dict): rcnn testing config ori_shape (Tuple): original image height and width, shape (2,) Returns: Tensor: a mask of shape (N, img_h, img_w). """ mask_pred = mask_pred.sigmoid() bboxes = det_bboxes[:, :4] labels = det_labels # No need to consider rescale and scale_factor while exporting to ONNX img_h, img_w = ori_shape[:2] threshold = rcnn_test_cfg.mask_thr_binary if not self.class_agnostic: box_inds = torch.arange(mask_pred.shape[0]) mask_pred = mask_pred[box_inds, labels][:, None] masks, _ = _do_paste_mask( mask_pred, bboxes, img_h, img_w, skip_empty=False) if threshold >= 0: # should convert to float to avoid problems in TRT masks = (masks >= threshold).to(dtype=torch.float) return masks def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): """Paste instance masks according to boxes. This implementation is modified from https://github.com/facebookresearch/detectron2/ Args: masks (Tensor): N, 1, H, W boxes (Tensor): N, 4 img_h (int): Height of the image to be pasted. img_w (int): Width of the image to be pasted. skip_empty (bool): Only paste masks within the region that tightly bound all boxes, and returns the results this region only. An important optimization for CPU. Returns: tuple: (Tensor, tuple). The first item is mask tensor, the second one is the slice object. If skip_empty == False, the whole image will be pasted. It will return a mask of shape (N, img_h, img_w) and an empty tuple. If skip_empty == True, only area around the mask will be pasted. A mask of shape (N, h', w') and its start and end coordinates in the original image will be returned. """ # On GPU, paste all masks together (up to chunk size) # by using the entire image to sample the masks # Compared to pasting them one by one, # this has more operations but is faster on COCO-scale dataset. device = masks.device if skip_empty: x0_int, y0_int = torch.clamp( boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(dtype=torch.int32) x1_int = torch.clamp( boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) y1_int = torch.clamp( boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) else: x0_int, y0_int = 0, 0 x1_int, y1_int = img_w, img_h x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 N = masks.shape[0] img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5 img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5 img_y = (img_y - y0) / (y1 - y0) * 2 - 1 img_x = (img_x - x0) / (x1 - x0) * 2 - 1 # img_x, img_y have shapes (N, w), (N, h) # IsInf op is not supported with ONNX<=1.7.0 if not torch.onnx.is_in_onnx_export(): if torch.isinf(img_x).any(): inds = torch.where(torch.isinf(img_x)) img_x[inds] = 0 if torch.isinf(img_y).any(): inds = torch.where(torch.isinf(img_y)) img_y[inds] = 0 gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) grid = torch.stack([gx, gy], dim=3) img_masks = F.grid_sample( masks.to(dtype=torch.float32), grid, align_corners=False) if skip_empty: return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) else: return img_masks[:, 0], () ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/feature_relay_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.builder import HEADS @HEADS.register_module() class FeatureRelayHead(BaseModule): """Feature Relay Head used in `SCNet `_. Args: in_channels (int, optional): number of input channels. Default: 256. conv_out_channels (int, optional): number of output channels before classification layer. Default: 256. roi_feat_size (int, optional): roi feat size at box head. Default: 7. scale_factor (int, optional): scale factor to match roi feat size at mask head. Default: 2. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels=1024, out_conv_channels=256, roi_feat_size=7, scale_factor=2, init_cfg=dict(type='Kaiming', layer='Linear')): super(FeatureRelayHead, self).__init__(init_cfg) assert isinstance(roi_feat_size, int) self.in_channels = in_channels self.out_conv_channels = out_conv_channels self.roi_feat_size = roi_feat_size self.out_channels = (roi_feat_size**2) * out_conv_channels self.scale_factor = scale_factor self.fp16_enabled = False self.fc = nn.Linear(self.in_channels, self.out_channels) self.upsample = nn.Upsample( scale_factor=scale_factor, mode='bilinear', align_corners=True) @auto_fp16() def forward(self, x): """Forward function.""" N, in_C = x.shape if N > 0: out_C = self.out_conv_channels out_HW = self.roi_feat_size x = self.fc(x) x = x.reshape(N, out_C, out_HW, out_HW) x = self.upsample(x) return x return None ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class FusedSemanticHead(BaseModule): r"""Multi-level fused semantic segmentation head. .. code-block:: none in_1 -> 1x1 conv --- | in_2 -> 1x1 conv -- | || in_3 -> 1x1 conv - || ||| /-> 1x1 conv (mask prediction) in_4 -> 1x1 conv -----> 3x3 convs (*4) | \-> 1x1 conv (feature) in_5 -> 1x1 conv --- """ # noqa: W605 def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, conv_cfg=None, norm_cfg=None, ignore_label=None, loss_weight=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), init_cfg=dict( type='Kaiming', override=dict(name='conv_logits'))): super(FusedSemanticHead, self).__init__(init_cfg) self.num_ins = num_ins self.fusion_level = fusion_level self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.lateral_convs = nn.ModuleList() for i in range(self.num_ins): self.lateral_convs.append( ConvModule( self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_embedding = ConvModule( conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) if ignore_label: loss_seg['ignore_index'] = ignore_label if loss_weight: loss_seg['loss_weight'] = loss_weight if ignore_label or loss_weight: warnings.warn('``ignore_label`` and ``loss_weight`` would be ' 'deprecated soon. Please set ``ingore_index`` and ' '``loss_weight`` in ``loss_seg`` instead.') self.criterion = build_loss(loss_seg) @auto_fp16() def forward(self, feats): x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) fused_size = tuple(x.shape[-2:]) for i, feat in enumerate(feats): if i != self.fusion_level: feat = F.interpolate( feat, size=fused_size, mode='bilinear', align_corners=True) # fix runtime error of "+=" inplace operation in PyTorch 1.10 x = x + self.lateral_convs[i](feat) for i in range(self.num_convs): x = self.convs[i](x) mask_pred = self.conv_logits(x) x = self.conv_embedding(x) return mask_pred, x @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, labels): labels = labels.squeeze(1).long() loss_semantic_seg = self.criterion(mask_pred, labels) return loss_semantic_seg ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/global_context_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock @HEADS.register_module() class GlobalContextHead(BaseModule): """Global context head used in `SCNet `_. Args: num_convs (int, optional): number of convolutional layer in GlbCtxHead. Default: 4. in_channels (int, optional): number of input channels. Default: 256. conv_out_channels (int, optional): number of output channels before classification layer. Default: 256. num_classes (int, optional): number of classes. Default: 80. loss_weight (float, optional): global context loss weight. Default: 1. conv_cfg (dict, optional): config to init conv layer. Default: None. norm_cfg (dict, optional): config to init norm layer. Default: None. conv_to_res (bool, optional): if True, 2 convs will be grouped into 1 `SimplifiedBasicBlock` using a skip connection. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_weight=1.0, conv_cfg=None, norm_cfg=None, conv_to_res=False, init_cfg=dict( type='Normal', std=0.01, override=dict(name='fc'))): super(GlobalContextHead, self).__init__(init_cfg) self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.conv_to_res = conv_to_res self.fp16_enabled = False if self.conv_to_res: num_res_blocks = num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks else: self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(conv_out_channels, num_classes) self.criterion = nn.BCEWithLogitsLoss() @auto_fp16() def forward(self, feats): """Forward function.""" x = feats[-1] for i in range(self.num_convs): x = self.convs[i](x) x = self.pool(x) # multi-class prediction mc_pred = x.reshape(x.size(0), -1) mc_pred = self.fc(mc_pred) return mc_pred, x @force_fp32(apply_to=('pred', )) def loss(self, pred, labels): """Loss function.""" labels = [lbl.unique() for lbl in labels] targets = pred.new_zeros(pred.size()) for i, label in enumerate(labels): targets[i, label] = 1.0 loss = self.loss_weight * self.criterion(pred, targets) return loss ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/grid_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class GridHead(BaseModule): def __init__(self, grid_points=9, num_convs=8, roi_feat_size=14, in_channels=256, conv_kernel_size=3, point_feat_channels=64, deconv_kernel_size=4, class_agnostic=False, loss_grid=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=36), init_cfg=[ dict(type='Kaiming', layer=['Conv2d', 'Linear']), dict( type='Normal', layer='ConvTranspose2d', std=0.001, override=dict( type='Normal', name='deconv2', std=0.001, bias=-np.log(0.99 / 0.01))) ]): super(GridHead, self).__init__(init_cfg) self.grid_points = grid_points self.num_convs = num_convs self.roi_feat_size = roi_feat_size self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.point_feat_channels = point_feat_channels self.conv_out_channels = self.point_feat_channels * self.grid_points self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': assert self.conv_out_channels % norm_cfg['num_groups'] == 0 assert self.grid_points >= 4 self.grid_size = int(np.sqrt(self.grid_points)) if self.grid_size * self.grid_size != self.grid_points: raise ValueError('grid_points must be a square number') # the predicted heatmap is half of whole_map_size if not isinstance(self.roi_feat_size, int): raise ValueError('Only square RoIs are supporeted in Grid R-CNN') self.whole_map_size = self.roi_feat_size * 4 # compute point-wise sub-regions self.sub_regions = self.calc_sub_regions() self.convs = [] for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) stride = 2 if i == 0 else 1 padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, stride=stride, padding=padding, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=True)) self.convs = nn.Sequential(*self.convs) self.deconv1 = nn.ConvTranspose2d( self.conv_out_channels, self.conv_out_channels, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) self.deconv2 = nn.ConvTranspose2d( self.conv_out_channels, grid_points, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) # find the 4-neighbor of each grid point self.neighbor_points = [] grid_size = self.grid_size for i in range(grid_size): # i-th column for j in range(grid_size): # j-th row neighbors = [] if i > 0: # left: (i - 1, j) neighbors.append((i - 1) * grid_size + j) if j > 0: # up: (i, j - 1) neighbors.append(i * grid_size + j - 1) if j < grid_size - 1: # down: (i, j + 1) neighbors.append(i * grid_size + j + 1) if i < grid_size - 1: # right: (i + 1, j) neighbors.append((i + 1) * grid_size + j) self.neighbor_points.append(tuple(neighbors)) # total edges in the grid self.num_edges = sum([len(p) for p in self.neighbor_points]) self.forder_trans = nn.ModuleList() # first-order feature transition self.sorder_trans = nn.ModuleList() # second-order feature transition for neighbors in self.neighbor_points: fo_trans = nn.ModuleList() so_trans = nn.ModuleList() for _ in range(len(neighbors)): # each transition module consists of a 5x5 depth-wise conv and # 1x1 conv. fo_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, stride=1, padding=2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) so_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, 1, 2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) self.forder_trans.append(fo_trans) self.sorder_trans.append(so_trans) self.loss_grid = build_loss(loss_grid) def forward(self, x): assert x.shape[-1] == x.shape[-2] == self.roi_feat_size # RoI feature transformation, downsample 2x x = self.convs(x) c = self.point_feat_channels # first-order fusion x_fo = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_fo[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_fo[i] = x_fo[i] + self.forder_trans[i][j]( x[:, point_idx * c:(point_idx + 1) * c]) # second-order fusion x_so = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_so[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) # predicted heatmap with fused features x2 = torch.cat(x_so, dim=1) x2 = self.deconv1(x2) x2 = F.relu(self.norm1(x2), inplace=True) heatmap = self.deconv2(x2) # predicted heatmap with original features (applicable during training) if self.training: x1 = x x1 = self.deconv1(x1) x1 = F.relu(self.norm1(x1), inplace=True) heatmap_unfused = self.deconv2(x1) else: heatmap_unfused = heatmap return dict(fused=heatmap, unfused=heatmap_unfused) def calc_sub_regions(self): """Compute point specific representation regions. See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details. """ # to make it consistent with the original implementation, half_size # is computed as 2 * quarter_size, which is smaller half_size = self.whole_map_size // 4 * 2 sub_regions = [] for i in range(self.grid_points): x_idx = i // self.grid_size y_idx = i % self.grid_size if x_idx == 0: sub_x1 = 0 elif x_idx == self.grid_size - 1: sub_x1 = half_size else: ratio = x_idx / (self.grid_size - 1) - 0.25 sub_x1 = max(int(ratio * self.whole_map_size), 0) if y_idx == 0: sub_y1 = 0 elif y_idx == self.grid_size - 1: sub_y1 = half_size else: ratio = y_idx / (self.grid_size - 1) - 0.25 sub_y1 = max(int(ratio * self.whole_map_size), 0) sub_regions.append( (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) return sub_regions def get_targets(self, sampling_results, rcnn_train_cfg): # mix all samples (across images) together. pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], dim=0).cpu() pos_gt_bboxes = torch.cat( [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() assert pos_bboxes.shape == pos_gt_bboxes.shape # expand pos_bboxes to 2x of original size x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) num_rois = pos_bboxes.shape[0] map_size = self.whole_map_size # this is not the final target shape targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), dtype=torch.float) # pre-compute interpolation factors for all grid points. # the first item is the factor of x-dim, and the second is y-dim. # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) factors = [] for j in range(self.grid_points): x_idx = j // self.grid_size y_idx = j % self.grid_size factors.append((1 - x_idx / (self.grid_size - 1), 1 - y_idx / (self.grid_size - 1))) radius = rcnn_train_cfg.pos_radius radius2 = radius**2 for i in range(num_rois): # ignore small bboxes if (pos_bbox_ws[i] <= self.grid_size or pos_bbox_hs[i] <= self.grid_size): continue # for each grid point, mark a small circle as positive for j in range(self.grid_points): factor_x, factor_y = factors[j] gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( 1 - factor_x) * pos_gt_bboxes[i, 2] gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( 1 - factor_y) * pos_gt_bboxes[i, 3] cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * map_size) cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * map_size) for x in range(cx - radius, cx + radius + 1): for y in range(cy - radius, cy + radius + 1): if x >= 0 and x < map_size and y >= 0 and y < map_size: if (x - cx)**2 + (y - cy)**2 <= radius2: targets[i, j, y, x] = 1 # reduce the target heatmap size by a half # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). sub_targets = [] for i in range(self.grid_points): sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) sub_targets = torch.cat(sub_targets, dim=1) sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device) return sub_targets def loss(self, grid_pred, grid_targets): loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) loss_grid = loss_fused + loss_unfused return dict(loss_grid=loss_grid) def get_bboxes(self, det_bboxes, grid_pred, img_metas): # TODO: refactoring assert det_bboxes.shape[0] == grid_pred.shape[0] det_bboxes = det_bboxes.cpu() cls_scores = det_bboxes[:, [4]] det_bboxes = det_bboxes[:, :4] grid_pred = grid_pred.sigmoid().cpu() R, c, h, w = grid_pred.shape half_size = self.whole_map_size // 4 * 2 assert h == w == half_size assert c == self.grid_points # find the point with max scores in the half-sized heatmap grid_pred = grid_pred.view(R * c, h * w) pred_scores, pred_position = grid_pred.max(dim=1) xs = pred_position % w ys = pred_position // w # get the position in the whole heatmap instead of half-sized heatmap for i in range(self.grid_points): xs[i::self.grid_points] += self.sub_regions[i][0] ys[i::self.grid_points] += self.sub_regions[i][1] # reshape to (num_rois, grid_points) pred_scores, xs, ys = tuple( map(lambda x: x.view(R, c), [pred_scores, xs, ys])) # get expanded pos_bboxes widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1) heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1) x1 = (det_bboxes[:, 0, None] - widths / 2) y1 = (det_bboxes[:, 1, None] - heights / 2) # map the grid point to the absolute coordinates abs_xs = (xs.float() + 0.5) / w * widths + x1 abs_ys = (ys.float() + 0.5) / h * heights + y1 # get the grid points indices that fall on the bbox boundaries x1_inds = [i for i in range(self.grid_size)] y1_inds = [i * self.grid_size for i in range(self.grid_size)] x2_inds = [ self.grid_points - self.grid_size + i for i in range(self.grid_size) ] y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] # voting of all grid points on some boundary bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) bbox_res = torch.cat( [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1) bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1]) bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0]) return bbox_res ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/htc_mask_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import ConvModule from mmdet.models.builder import HEADS from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class HTCMaskHead(FCNMaskHead): def __init__(self, with_conv_res=True, *args, **kwargs): super(HTCMaskHead, self).__init__(*args, **kwargs) self.with_conv_res = with_conv_res if self.with_conv_res: self.conv_res = ConvModule( self.conv_out_channels, self.conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, x, res_feat=None, return_logits=True, return_feat=True): if res_feat is not None: assert self.with_conv_res res_feat = self.conv_res(res_feat) x = x + res_feat for conv in self.convs: x = conv(x) res_feat = x outs = [] if return_logits: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) outs.append(mask_pred) if return_feat: outs.append(res_feat) return outs if len(outs) > 1 else outs[0] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point from mmcv.runner import BaseModule from mmdet.models.builder import HEADS, build_loss from mmdet.models.utils import (get_uncertain_point_coords_with_randomness, get_uncertainty) @HEADS.register_module() class MaskPointHead(BaseModule): """A mask point head use in PointRend. ``MaskPointHead`` use shared multi-layer perceptron (equivalent to nn.Conv1d) to predict the logit of input points. The fine-grained feature and coarse feature will be concatenate together for predication. Args: num_fcs (int): Number of fc layers in the head. Default: 3. in_channels (int): Number of input channels. Default: 256. fc_channels (int): Number of fc channels. Default: 256. num_classes (int): Number of classes for logits. Default: 80. class_agnostic (bool): Whether use class agnostic classification. If so, the output channels of logits will be 1. Default: False. coarse_pred_each_layer (bool): Whether concatenate coarse feature with the output of each fc layer. Default: True. conv_cfg (dict | None): Dictionary to construct and config conv layer. Default: dict(type='Conv1d')) norm_cfg (dict | None): Dictionary to construct and config norm layer. Default: None. loss_point (dict): Dictionary to construct and config loss layer of point head. Default: dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0). init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, num_fcs=3, in_channels=256, fc_channels=256, class_agnostic=False, coarse_pred_each_layer=True, conv_cfg=dict(type='Conv1d'), norm_cfg=None, act_cfg=dict(type='ReLU'), loss_point=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), init_cfg=dict( type='Normal', std=0.001, override=dict(name='fc_logits'))): super().__init__(init_cfg) self.num_fcs = num_fcs self.in_channels = in_channels self.fc_channels = fc_channels self.num_classes = num_classes self.class_agnostic = class_agnostic self.coarse_pred_each_layer = coarse_pred_each_layer self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.loss_point = build_loss(loss_point) fc_in_channels = in_channels + num_classes self.fcs = nn.ModuleList() for _ in range(num_fcs): fc = ConvModule( fc_in_channels, fc_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.fcs.append(fc) fc_in_channels = fc_channels fc_in_channels += num_classes if self.coarse_pred_each_layer else 0 out_channels = 1 if self.class_agnostic else self.num_classes self.fc_logits = nn.Conv1d( fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, fine_grained_feats, coarse_feats): """Classify each point base on fine grained and coarse feats. Args: fine_grained_feats (Tensor): Fine grained feature sampled from FPN, shape (num_rois, in_channels, num_points). coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead, shape (num_rois, num_classes, num_points). Returns: Tensor: Point classification results, shape (num_rois, num_class, num_points). """ x = torch.cat([fine_grained_feats, coarse_feats], dim=1) for fc in self.fcs: x = fc(x) if self.coarse_pred_each_layer: x = torch.cat((x, coarse_feats), dim=1) return self.fc_logits(x) def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks, cfg): """Get training targets of MaskPointHead for all images. Args: rois (Tensor): Region of Interest, shape (num_rois, 5). rel_roi_points: Points coordinates relative to RoI, shape (num_rois, num_points, 2). sampling_results (:obj:`SamplingResult`): Sampling result after sampling and assignment. gt_masks (Tensor) : Ground truth segmentation masks of corresponding boxes, shape (num_rois, height, width). cfg (dict): Training cfg. Returns: Tensor: Point target, shape (num_rois, num_points). """ num_imgs = len(sampling_results) rois_list = [] rel_roi_points_list = [] for batch_ind in range(num_imgs): inds = (rois[:, 0] == batch_ind) rois_list.append(rois[inds]) rel_roi_points_list.append(rel_roi_points[inds]) pos_assigned_gt_inds_list = [ res.pos_assigned_gt_inds for res in sampling_results ] cfg_list = [cfg for _ in range(num_imgs)] point_targets = map(self._get_target_single, rois_list, rel_roi_points_list, pos_assigned_gt_inds_list, gt_masks, cfg_list) point_targets = list(point_targets) if len(point_targets) > 0: point_targets = torch.cat(point_targets) return point_targets def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds, gt_masks, cfg): """Get training target of MaskPointHead for each image.""" num_pos = rois.size(0) num_points = cfg.num_points if num_pos > 0: gt_masks_th = ( gt_masks.to_tensor(rois.dtype, rois.device).index_select( 0, pos_assigned_gt_inds)) gt_masks_th = gt_masks_th.unsqueeze(1) rel_img_points = rel_roi_point_to_rel_img_point( rois, rel_roi_points, gt_masks_th) point_targets = point_sample(gt_masks_th, rel_img_points).squeeze(1) else: point_targets = rois.new_zeros((0, num_points)) return point_targets def loss(self, point_pred, point_targets, labels): """Calculate loss for MaskPointHead. Args: point_pred (Tensor): Point predication result, shape (num_rois, num_classes, num_points). point_targets (Tensor): Point targets, shape (num_roi, num_points). labels (Tensor): Class label of corresponding boxes, shape (num_rois, ) Returns: dict[str, Tensor]: a dictionary of point loss components """ loss = dict() if self.class_agnostic: loss_point = self.loss_point(point_pred, point_targets, torch.zeros_like(labels)) else: loss_point = self.loss_point(point_pred, point_targets, labels) loss['loss_point'] = loss_point return loss def get_roi_rel_points_train(self, mask_pred, labels, cfg): """Get ``num_points`` most uncertain points with random points during train. Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The uncertainties are calculated for each point using '_get_uncertainty()' function that takes point's logit prediction as input. Args: mask_pred (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. labels (list): The ground truth class for each instance. cfg (dict): Training config of point head. Returns: point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains the coordinates sampled points. """ point_coords = get_uncertain_point_coords_with_randomness( mask_pred, labels, cfg.num_points, cfg.oversample_ratio, cfg.importance_sample_ratio) return point_coords def get_roi_rel_points_test(self, mask_pred, pred_label, cfg): """Get ``num_points`` most uncertain points during test. Args: mask_pred (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. pred_label (list): The predication class for each instance. cfg (dict): Testing config of point head. Returns: point_indices (Tensor): A tensor of shape (num_rois, num_points) that contains indices from [0, mask_height x mask_width) of the most uncertain points. point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid . """ num_points = cfg.subdivision_num_points uncertainty_map = get_uncertainty(mask_pred, pred_label) num_rois, _, mask_height, mask_width = uncertainty_map.shape # During ONNX exporting, the type of each elements of 'shape' is # `Tensor(float)`, while it is `float` during PyTorch inference. if isinstance(mask_height, torch.Tensor): h_step = 1.0 / mask_height.float() w_step = 1.0 / mask_width.float() else: h_step = 1.0 / mask_height w_step = 1.0 / mask_width # cast to int to avoid dynamic K for TopK op in ONNX mask_size = int(mask_height * mask_width) uncertainty_map = uncertainty_map.view(num_rois, mask_size) num_points = min(mask_size, num_points) point_indices = uncertainty_map.topk(num_points, dim=1)[1] xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step point_coords = torch.stack([xs, ys], dim=2) return point_indices, point_coords ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/maskiou_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn from mmcv.cnn import Conv2d, Linear, MaxPool2d from mmcv.runner import BaseModule, force_fp32 from torch.nn.modules.utils import _pair from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class MaskIoUHead(BaseModule): """Mask IoU Head. This head predicts the IoU of predicted masks and corresponding gt masks. """ def __init__(self, num_convs=4, num_fcs=2, roi_feat_size=14, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80, loss_iou=dict(type='MSELoss', loss_weight=0.5), init_cfg=[ dict(type='Kaiming', override=dict(name='convs')), dict(type='Caffe2Xavier', override=dict(name='fcs')), dict( type='Normal', std=0.01, override=dict(name='fc_mask_iou')) ]): super(MaskIoUHead, self).__init__(init_cfg) self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.num_classes = num_classes self.fp16_enabled = False self.convs = nn.ModuleList() for i in range(num_convs): if i == 0: # concatenation of mask feature and mask prediction in_channels = self.in_channels + 1 else: in_channels = self.conv_out_channels stride = 2 if i == num_convs - 1 else 1 self.convs.append( Conv2d( in_channels, self.conv_out_channels, 3, stride=stride, padding=1)) roi_feat_size = _pair(roi_feat_size) pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2) self.fcs = nn.ModuleList() for i in range(num_fcs): in_channels = ( self.conv_out_channels * pooled_area if i == 0 else self.fc_out_channels) self.fcs.append(Linear(in_channels, self.fc_out_channels)) self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes) self.relu = nn.ReLU() self.max_pool = MaxPool2d(2, 2) self.loss_iou = build_loss(loss_iou) def forward(self, mask_feat, mask_pred): mask_pred = mask_pred.sigmoid() mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1)) x = torch.cat((mask_feat, mask_pred_pooled), 1) for conv in self.convs: x = self.relu(conv(x)) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_iou = self.fc_mask_iou(x) return mask_iou @force_fp32(apply_to=('mask_iou_pred', )) def loss(self, mask_iou_pred, mask_iou_targets): pos_inds = mask_iou_targets > 0 if pos_inds.sum() > 0: loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds], mask_iou_targets[pos_inds]) else: loss_mask_iou = mask_iou_pred.sum() * 0 return dict(loss_mask_iou=loss_mask_iou) @force_fp32(apply_to=('mask_pred', )) def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets, rcnn_train_cfg): """Compute target of mask IoU. Mask IoU target is the IoU of the predicted mask (inside a bbox) and the gt mask of corresponding gt mask (the whole instance). The intersection area is computed inside the bbox, and the gt mask area is computed with two steps, firstly we compute the gt area inside the bbox, then divide it by the area ratio of gt area inside the bbox and the gt area of the whole instance. Args: sampling_results (list[:obj:`SamplingResult`]): sampling results. gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance) of each image, with the same shape of the input image. mask_pred (Tensor): Predicted masks of each positive proposal, shape (num_pos, h, w). mask_targets (Tensor): Gt mask of each positive proposal, binary map of the shape (num_pos, h, w). rcnn_train_cfg (dict): Training config for R-CNN part. Returns: Tensor: mask iou target (length == num positive). """ pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] # compute the area ratio of gt areas inside the proposals and # the whole instance area_ratios = map(self._get_area_ratio, pos_proposals, pos_assigned_gt_inds, gt_masks) area_ratios = torch.cat(list(area_ratios)) assert mask_targets.size(0) == area_ratios.size(0) mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float() mask_pred_areas = mask_pred.sum((-1, -2)) # mask_pred and mask_targets are binary maps overlap_areas = (mask_pred * mask_targets).sum((-1, -2)) # compute the mask area of the whole instance gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7) mask_iou_targets = overlap_areas / ( mask_pred_areas + gt_full_areas - overlap_areas) return mask_iou_targets def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): """Compute area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance.""" num_pos = pos_proposals.size(0) if num_pos > 0: area_ratios = [] proposals_np = pos_proposals.cpu().numpy() pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() # compute mask areas of gt instances (batch processing for speedup) gt_instance_mask_area = gt_masks.areas for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] # crop the gt mask inside the proposal bbox = proposals_np[i, :].astype(np.int32) gt_mask_in_proposal = gt_mask.crop(bbox) ratio = gt_mask_in_proposal.areas[0] / ( gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) area_ratios.append(ratio) area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( pos_proposals.device) else: area_ratios = pos_proposals.new_zeros((0, )) return area_ratios @force_fp32(apply_to=('mask_iou_pred', )) def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels): """Get the mask scores. mask_score = bbox_score * mask_iou """ inds = range(det_labels.size(0)) mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1] mask_scores = mask_scores.cpu().numpy() det_labels = det_labels.cpu().numpy() return [mask_scores[det_labels == i] for i in range(self.num_classes)] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class SCNetMaskHead(FCNMaskHead): """Mask head for `SCNet `_. Args: conv_to_res (bool, optional): if True, change the conv layers to ``SimplifiedBasicBlock``. """ def __init__(self, conv_to_res=True, **kwargs): super(SCNetMaskHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if conv_to_res: assert self.conv_kernel_size == 3 self.num_res_blocks = self.num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, self.num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock from .fused_semantic_head import FusedSemanticHead @HEADS.register_module() class SCNetSemanticHead(FusedSemanticHead): """Mask head for `SCNet `_. Args: conv_to_res (bool, optional): if True, change the conv layers to ``SimplifiedBasicBlock``. """ def __init__(self, conv_to_res=True, **kwargs): super(SCNetSemanticHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if self.conv_to_res: num_res_blocks = self.num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_scoring_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import bbox2roi from ..builder import HEADS, build_head from .standard_roi_head import StandardRoIHead @HEADS.register_module() class MaskScoringRoIHead(StandardRoIHead): """Mask Scoring RoIHead for Mask Scoring RCNN. https://arxiv.org/abs/1903.00241 """ def __init__(self, mask_iou_head, **kwargs): assert mask_iou_head is not None super(MaskScoringRoIHead, self).__init__(**kwargs) self.mask_iou_head = build_head(mask_iou_head) def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for Mask head in training.""" pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) mask_results = super(MaskScoringRoIHead, self)._mask_forward_train(x, sampling_results, bbox_feats, gt_masks, img_metas) if mask_results['loss_mask'] is None: return mask_results # mask iou head forward and loss pos_mask_pred = mask_results['mask_pred'][ range(mask_results['mask_pred'].size(0)), pos_labels] mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'], pos_mask_pred) pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels] mask_iou_targets = self.mask_iou_head.get_targets( sampling_results, gt_masks, pos_mask_pred, mask_results['mask_targets'], self.train_cfg) loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets) mask_results['loss_mask'].update(loss_mask_iou) return mask_results def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Obtain mask prediction without augmentation.""" # image shapes of images in the batch ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): num_classes = self.mask_head.num_classes segm_results = [[[] for _ in range(num_classes)] for _ in range(num_imgs)] mask_scores = [[[] for _ in range(num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) concat_det_labels = torch.cat(det_labels) # get mask scores with mask iou head mask_feats = mask_results['mask_feats'] mask_pred = mask_results['mask_pred'] mask_iou_pred = self.mask_iou_head( mask_feats, mask_pred[range(concat_det_labels.size(0)), concat_det_labels]) # split batch mask prediction back to each image num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes) mask_preds = mask_pred.split(num_bboxes_per_img, 0) mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0) # apply mask post-processing to each image individually segm_results = [] mask_scores = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) mask_scores.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) # get mask scores with mask iou head mask_score = self.mask_iou_head.get_mask_scores( mask_iou_preds[i], det_bboxes[i], det_labels[i]) segm_results.append(segm_result) mask_scores.append(mask_score) return list(zip(segm_results, mask_scores)) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/pisa_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import bbox2roi from ..builder import HEADS from ..losses.pisa_loss import carl_loss, isr_p from .standard_roi_head import StandardRoIHead @HEADS.register_module() class PISARoIHead(StandardRoIHead): r"""The RoI head for `Prime Sample Attention in Object Detection `_.""" def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """Forward function for training. Args: x (list[Tensor]): List of multi-level img features. img_metas (list[dict]): List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): List of region proposals. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes_ignore (list[Tensor], optional): Specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : True segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # assign gts and sample proposals if self.with_bbox or self.with_mask: num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] neg_label_weights = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign( proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) # neg label weight is obtained by sampling when using ISR-N neg_label_weight = None if isinstance(sampling_result, tuple): sampling_result, neg_label_weight = sampling_result sampling_results.append(sampling_result) neg_label_weights.append(neg_label_weight) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self._bbox_forward_train( x, sampling_results, gt_bboxes, gt_labels, img_metas, neg_label_weights=neg_label_weights) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) return losses def _bbox_forward(self, x, rois): """Box forward function used in both training and testing.""" # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas, neg_label_weights=None): """Run forward function and calculate loss for box head in training.""" rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) # neg_label_weights obtained by sampler is image-wise, mapping back to # the corresponding location in label weights if neg_label_weights[0] is not None: label_weights = bbox_targets[1] cur_num_rois = 0 for i in range(len(sampling_results)): num_pos = sampling_results[i].pos_inds.size(0) num_neg = sampling_results[i].neg_inds.size(0) label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos + num_neg] = neg_label_weights[i] cur_num_rois += num_pos + num_neg cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] # Apply ISR-P isr_cfg = self.train_cfg.get('isr', None) if isr_cfg is not None: bbox_targets = isr_p( cls_score, bbox_pred, bbox_targets, rois, sampling_results, self.bbox_head.loss_cls, self.bbox_head.bbox_coder, **isr_cfg, num_class=self.bbox_head.num_classes) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois, *bbox_targets) # Add CARL Loss carl_cfg = self.train_cfg.get('carl', None) if carl_cfg is not None: loss_carl = carl_loss( cls_score, bbox_targets[0], bbox_pred, bbox_targets[2], self.bbox_head.loss_bbox, **carl_cfg, num_class=self.bbox_head.num_classes) loss_bbox.update(loss_carl) bbox_results.update(loss_bbox=loss_bbox) return bbox_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/point_rend_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa import os import warnings import numpy as np import torch import torch.nn.functional as F from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks from .. import builder from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class PointRendRoIHead(StandardRoIHead): """`PointRend `_.""" def __init__(self, point_head, *args, **kwargs): super().__init__(*args, **kwargs) assert self.with_bbox and self.with_mask self.init_point_head(point_head) def init_point_head(self, point_head): """Initialize ``point_head``""" self.point_head = builder.build_head(point_head) def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for mask head and point head in training.""" mask_results = super()._mask_forward_train(x, sampling_results, bbox_feats, gt_masks, img_metas) if mask_results['loss_mask'] is not None: loss_point = self._mask_point_forward_train( x, sampling_results, mask_results['mask_pred'], gt_masks, img_metas) mask_results['loss_mask'].update(loss_point) return mask_results def _mask_point_forward_train(self, x, sampling_results, mask_pred, gt_masks, img_metas): """Run forward function and calculate loss for point head in training.""" pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) rel_roi_points = self.point_head.get_roi_rel_points_train( mask_pred, pos_labels, cfg=self.train_cfg) rois = bbox2roi([res.pos_bboxes for res in sampling_results]) fine_grained_point_feats = self._get_fine_grained_point_feats( x, rois, rel_roi_points, img_metas) coarse_point_feats = point_sample(mask_pred, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) mask_point_target = self.point_head.get_targets( rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg) loss_mask_point = self.point_head.loss(mask_point_pred, mask_point_target, pos_labels) return loss_mask_point def _get_fine_grained_point_feats(self, x, rois, rel_roi_points, img_metas): """Sample fine grained feats from each level feature map and concatenate them together. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid. img_metas (list[dict]): Image meta info. Returns: Tensor: The fine grained features for each points, has shape (num_rois, feats_channels, num_points). """ num_imgs = len(img_metas) fine_grained_feats = [] for idx in range(self.mask_roi_extractor.num_inputs): feats = x[idx] spatial_scale = 1. / float( self.mask_roi_extractor.featmap_strides[idx]) point_feats = [] for batch_ind in range(num_imgs): # unravel batch dim feat = feats[batch_ind].unsqueeze(0) inds = (rois[:, 0].long() == batch_ind) if inds.any(): rel_img_points = rel_roi_point_to_rel_img_point( rois[inds], rel_roi_points[inds], feat.shape[2:], spatial_scale).unsqueeze(0) point_feat = point_sample(feat, rel_img_points) point_feat = point_feat.squeeze(0).transpose(0, 1) point_feats.append(point_feat) fine_grained_feats.append(torch.cat(point_feats, dim=0)) return torch.cat(fine_grained_feats, dim=1) def _mask_point_forward_test(self, x, rois, label_pred, mask_pred, img_metas): """Mask refining process with point head in testing. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). label_pred (Tensor): The predication class for each rois. mask_pred (Tensor): The predication coarse masks of shape (num_rois, num_classes, small_size, small_size). img_metas (list[dict]): Image meta info. Returns: Tensor: The refined masks of shape (num_rois, num_classes, large_size, large_size). """ refined_mask_pred = mask_pred.clone() for subdivision_step in range(self.test_cfg.subdivision_steps): refined_mask_pred = F.interpolate( refined_mask_pred, scale_factor=self.test_cfg.scale_factor, mode='bilinear', align_corners=False) # If `subdivision_num_points` is larger or equal to the # resolution of the next step, then we can skip this step num_rois, channels, mask_height, mask_width = \ refined_mask_pred.shape if (self.test_cfg.subdivision_num_points >= self.test_cfg.scale_factor**2 * mask_height * mask_width and subdivision_step < self.test_cfg.subdivision_steps - 1): continue point_indices, rel_roi_points = \ self.point_head.get_roi_rel_points_test( refined_mask_pred, label_pred, cfg=self.test_cfg) fine_grained_point_feats = self._get_fine_grained_point_feats( x, rois, rel_roi_points, img_metas) coarse_point_feats = point_sample(mask_pred, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) refined_mask_pred = refined_mask_pred.reshape( num_rois, channels, mask_height * mask_width) refined_mask_pred = refined_mask_pred.scatter_( 2, point_indices, mask_point_pred) refined_mask_pred = refined_mask_pred.view(num_rois, channels, mask_height, mask_width) return refined_mask_pred def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Obtain mask prediction without augmentation.""" ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) if isinstance(scale_factors[0], float): warnings.warn( 'Scale factor in img_metas should be a ' 'ndarray with shape (4,) ' 'arrange as (factor_w, factor_h, factor_w, factor_h), ' 'The scale_factor with float type has been deprecated. ') scale_factors = np.array([scale_factors] * 4, dtype=np.float32) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.mask_head.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))] if rescale: scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ _bboxes[i] * scale_factors[i] for i in range(len(_bboxes)) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) # split batch mask prediction back to each image mask_pred = mask_results['mask_pred'] num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] mask_preds = mask_pred.split(num_mask_roi_per_img, 0) mask_rois = mask_rois.split(num_mask_roi_per_img, 0) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: x_i = [xx[[i]] for xx in x] mask_rois_i = mask_rois[i] mask_rois_i[:, 0] = 0 # TODO: remove this hack mask_pred_i = self._mask_point_forward_test( x_i, mask_rois_i, det_labels[i], mask_preds[i], [img_metas]) segm_result = self.mask_head.get_seg_masks( mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) return segm_results def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): """Test for mask head with test time augmentation.""" if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta in zip(feats, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) mask_results = self._mask_forward(x, mask_rois) mask_results['mask_pred'] = self._mask_point_forward_test( x, mask_rois, det_labels, mask_results['mask_pred'], img_meta) # convert to numpy array to save memory aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, self.test_cfg, ori_shape, scale_factor=1.0, rescale=False) return segm_result def _onnx_get_fine_grained_point_feats(self, x, rois, rel_roi_points): """Export the process of sampling fine grained feats to onnx. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid. Returns: Tensor: The fine grained features for each points, has shape (num_rois, feats_channels, num_points). """ batch_size = x[0].shape[0] num_rois = rois.shape[0] fine_grained_feats = [] for idx in range(self.mask_roi_extractor.num_inputs): feats = x[idx] spatial_scale = 1. / float( self.mask_roi_extractor.featmap_strides[idx]) rel_img_points = rel_roi_point_to_rel_img_point( rois, rel_roi_points, feats, spatial_scale) channels = feats.shape[1] num_points = rel_img_points.shape[1] rel_img_points = rel_img_points.reshape(batch_size, -1, num_points, 2) point_feats = point_sample(feats, rel_img_points) point_feats = point_feats.transpose(1, 2).reshape( num_rois, channels, num_points) fine_grained_feats.append(point_feats) return torch.cat(fine_grained_feats, dim=1) def _mask_point_onnx_export(self, x, rois, label_pred, mask_pred): """Export mask refining process with point head to onnx. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). label_pred (Tensor): The predication class for each rois. mask_pred (Tensor): The predication coarse masks of shape (num_rois, num_classes, small_size, small_size). Returns: Tensor: The refined masks of shape (num_rois, num_classes, large_size, large_size). """ refined_mask_pred = mask_pred.clone() for subdivision_step in range(self.test_cfg.subdivision_steps): refined_mask_pred = F.interpolate( refined_mask_pred, scale_factor=self.test_cfg.scale_factor, mode='bilinear', align_corners=False) # If `subdivision_num_points` is larger or equal to the # resolution of the next step, then we can skip this step num_rois, channels, mask_height, mask_width = \ refined_mask_pred.shape if (self.test_cfg.subdivision_num_points >= self.test_cfg.scale_factor**2 * mask_height * mask_width and subdivision_step < self.test_cfg.subdivision_steps - 1): continue point_indices, rel_roi_points = \ self.point_head.get_roi_rel_points_test( refined_mask_pred, label_pred, cfg=self.test_cfg) fine_grained_point_feats = self._onnx_get_fine_grained_point_feats( x, rois, rel_roi_points) coarse_point_feats = point_sample(mask_pred, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) refined_mask_pred = refined_mask_pred.reshape( num_rois, channels, mask_height * mask_width) is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' # avoid ScatterElements op in ONNX for TensorRT if is_trt_backend: mask_shape = refined_mask_pred.shape point_shape = point_indices.shape inds_dim0 = torch.arange(point_shape[0]).reshape( point_shape[0], 1, 1).expand_as(point_indices) inds_dim1 = torch.arange(point_shape[1]).reshape( 1, point_shape[1], 1).expand_as(point_indices) inds_1d = inds_dim0.reshape( -1) * mask_shape[1] * mask_shape[2] + inds_dim1.reshape( -1) * mask_shape[2] + point_indices.reshape(-1) refined_mask_pred = refined_mask_pred.reshape(-1) refined_mask_pred[inds_1d] = mask_point_pred.reshape(-1) refined_mask_pred = refined_mask_pred.reshape(*mask_shape) else: refined_mask_pred = refined_mask_pred.scatter_( 2, point_indices, mask_point_pred) refined_mask_pred = refined_mask_pred.view(num_rois, channels, mask_height, mask_width) return refined_mask_pred def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): """Export mask branch to onnx which supports batch inference. Args: x (tuple[Tensor]): Feature maps of all scale level. img_metas (list[dict]): Image meta info. det_bboxes (Tensor): Bboxes and corresponding scores. has shape [N, num_bboxes, 5]. det_labels (Tensor): class labels of shape [N, num_bboxes]. Returns: Tensor: The segmentation results of shape [N, num_bboxes, image_height, image_width]. """ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): raise RuntimeError('[ONNX Error] Can not record MaskHead ' 'as it has not been executed this time') batch_size = det_bboxes.size(0) # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. det_bboxes = det_bboxes[..., :4] batch_index = torch.arange( det_bboxes.size(0), device=det_bboxes.device).float().view( -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) mask_rois = mask_rois.view(-1, 5) mask_results = self._mask_forward(x, mask_rois) mask_pred = mask_results['mask_pred'] max_shape = img_metas[0]['img_shape_for_onnx'] num_det = det_bboxes.shape[1] det_bboxes = det_bboxes.reshape(-1, 4) det_labels = det_labels.reshape(-1) mask_pred = self._mask_point_onnx_export(x, mask_rois, det_labels, mask_pred) segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, det_labels, self.test_cfg, max_shape) segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], max_shape[1]) return segm_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/roi_extractors/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .base_roi_extractor import BaseRoIExtractor from .generic_roi_extractor import GenericRoIExtractor from .single_level_roi_extractor import SingleRoIExtractor __all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor'] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch import torch.nn as nn from mmcv import ops from mmcv.runner import BaseModule class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): """Base class for RoI extractor. Args: roi_layer (dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (int): Strides of input feature maps. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, roi_layer, out_channels, featmap_strides, init_cfg=None): super(BaseRoIExtractor, self).__init__(init_cfg) self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) self.out_channels = out_channels self.featmap_strides = featmap_strides self.fp16_enabled = False @property def num_inputs(self): """int: Number of input feature maps.""" return len(self.featmap_strides) def build_roi_layers(self, layer_cfg, featmap_strides): """Build RoI operator to extract feature from each level feature map. Args: layer_cfg (dict): Dictionary to construct and config RoI layer operation. Options are modules under ``mmcv/ops`` such as ``RoIAlign``. featmap_strides (List[int]): The stride of input feature map w.r.t to the original image size, which would be used to scale RoI coordinate (original image coordinate system) to feature coordinate system. Returns: nn.ModuleList: The RoI extractor modules for each level feature map. """ cfg = layer_cfg.copy() layer_type = cfg.pop('type') assert hasattr(ops, layer_type) layer_cls = getattr(ops, layer_type) roi_layers = nn.ModuleList( [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) return roi_layers def roi_rescale(self, rois, scale_factor): """Scale RoI coordinates by scale factor. Args: rois (torch.Tensor): RoI (Region of Interest), shape (n, 5) scale_factor (float): Scale factor that RoI will be multiplied by. Returns: torch.Tensor: Scaled RoI. """ cx = (rois[:, 1] + rois[:, 3]) * 0.5 cy = (rois[:, 2] + rois[:, 4]) * 0.5 w = rois[:, 3] - rois[:, 1] h = rois[:, 4] - rois[:, 2] new_w = w * scale_factor new_h = h * scale_factor x1 = cx - new_w * 0.5 x2 = cx + new_w * 0.5 y1 = cy - new_h * 0.5 y2 = cy + new_h * 0.5 new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) return new_rois @abstractmethod def forward(self, feats, rois, roi_scale_factor=None): pass ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn.bricks import build_plugin_layer from mmcv.runner import force_fp32 from mmdet.models.builder import ROI_EXTRACTORS from .base_roi_extractor import BaseRoIExtractor @ROI_EXTRACTORS.register_module() class GenericRoIExtractor(BaseRoIExtractor): """Extract RoI features from all level feature maps levels. This is the implementation of `A novel Region of Interest Extraction Layer for Instance Segmentation `_. Args: aggregation (str): The method to aggregate multiple feature maps. Options are 'sum', 'concat'. Default: 'sum'. pre_cfg (dict | None): Specify pre-processing modules. Default: None. post_cfg (dict | None): Specify post-processing modules. Default: None. kwargs (keyword arguments): Arguments that are the same as :class:`BaseRoIExtractor`. """ def __init__(self, aggregation='sum', pre_cfg=None, post_cfg=None, **kwargs): super(GenericRoIExtractor, self).__init__(**kwargs) assert aggregation in ['sum', 'concat'] self.aggregation = aggregation self.with_post = post_cfg is not None self.with_pre = pre_cfg is not None # build pre/post processing modules if self.with_post: self.post_module = build_plugin_layer(post_cfg, '_post_module')[1] if self.with_pre: self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1] @force_fp32(apply_to=('feats', ), out_fp16=True) def forward(self, feats, rois, roi_scale_factor=None): """Forward function.""" if len(feats) == 1: return self.roi_layers[0](feats[0], rois) out_size = self.roi_layers[0].output_size num_levels = len(feats) roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size) # some times rois is an empty tensor if roi_feats.shape[0] == 0: return roi_feats if roi_scale_factor is not None: rois = self.roi_rescale(rois, roi_scale_factor) # mark the starting channels for concat mode start_channels = 0 for i in range(num_levels): roi_feats_t = self.roi_layers[i](feats[i], rois) end_channels = start_channels + roi_feats_t.size(1) if self.with_pre: # apply pre-processing to a RoI extracted from each layer roi_feats_t = self.pre_module(roi_feats_t) if self.aggregation == 'sum': # and sum them all roi_feats = roi_feats + roi_feats_t else: # and concat them along channel dimension roi_feats[:, start_channels:end_channels] = roi_feats_t # update channels starting position start_channels = end_channels # check if concat channels match at the end if self.aggregation == 'concat': assert start_channels == self.out_channels if self.with_post: # apply post-processing before return the result roi_feats = self.post_module(roi_feats) return roi_feats ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.models.builder import ROI_EXTRACTORS from .base_roi_extractor import BaseRoIExtractor @ROI_EXTRACTORS.register_module() class SingleRoIExtractor(BaseRoIExtractor): """Extract RoI features from a single level feature map. If there are multiple input feature levels, each RoI is mapped to a level according to its scale. The mapping rule is proposed in `FPN `_. Args: roi_layer (dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (List[int]): Strides of input feature maps. finest_scale (int): Scale threshold of mapping to level 0. Default: 56. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, roi_layer, out_channels, featmap_strides, finest_scale=56, init_cfg=None): super(SingleRoIExtractor, self).__init__(roi_layer, out_channels, featmap_strides, init_cfg) self.finest_scale = finest_scale def map_roi_levels(self, rois, num_levels): """Map rois to corresponding feature levels by scales. - scale < finest_scale * 2: level 0 - finest_scale * 2 <= scale < finest_scale * 4: level 1 - finest_scale * 4 <= scale < finest_scale * 8: level 2 - scale >= finest_scale * 8: level 3 Args: rois (Tensor): Input RoIs, shape (k, 5). num_levels (int): Total level number. Returns: Tensor: Level index (0-based) of each RoI, shape (k, ) """ scale = torch.sqrt( (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2])) target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() return target_lvls @force_fp32(apply_to=('feats', ), out_fp16=True) def forward(self, feats, rois, roi_scale_factor=None): """Forward function.""" out_size = self.roi_layers[0].output_size num_levels = len(feats) expand_dims = (-1, self.out_channels * out_size[0] * out_size[1]) if torch.onnx.is_in_onnx_export(): # Work around to export mask-rcnn to onnx roi_feats = rois[:, :1].clone().detach() roi_feats = roi_feats.expand(*expand_dims) roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size) roi_feats = roi_feats * 0 else: roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size) # TODO: remove this when parrots supports if torch.__version__ == 'parrots': roi_feats.requires_grad = True if num_levels == 1: if len(rois) == 0: return roi_feats return self.roi_layers[0](feats[0], rois) target_lvls = self.map_roi_levels(rois, num_levels) if roi_scale_factor is not None: rois = self.roi_rescale(rois, roi_scale_factor) for i in range(num_levels): mask = target_lvls == i if torch.onnx.is_in_onnx_export(): # To keep all roi_align nodes exported to onnx # and skip nonzero op mask = mask.float().unsqueeze(-1) # select target level rois and reset the rest rois to zero. rois_i = rois.clone().detach() rois_i = rois_i * mask mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape) roi_feats_t = self.roi_layers[i](feats[i], rois_i) roi_feats_t = roi_feats_t * mask_exp roi_feats = roi_feats + roi_feats_t continue inds = mask.nonzero(as_tuple=False).squeeze(1) if inds.numel() > 0: rois_ = rois[inds] roi_feats_t = self.roi_layers[i](feats[i], rois_) roi_feats[inds] = roi_feats_t else: # Sometimes some pyramid levels will not be used for RoI # feature extraction and this will cause an incomplete # computation graph in one GPU, which is different from those # in other GPUs and will cause a hanging error. # Therefore, we add it to ensure each feature pyramid is # included in the computation graph to avoid runtime bugs. roi_feats = roi_feats + sum( x.view(-1)[0] for x in self.parameters()) * 0. + feats[i].sum() * 0. return roi_feats ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/scnet_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn.functional as F from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from ..utils.brick_wrappers import adaptive_avg_pool2d from .cascade_roi_head import CascadeRoIHead @HEADS.register_module() class SCNetRoIHead(CascadeRoIHead): """RoIHead for `SCNet `_. Args: num_stages (int): number of cascade stages. stage_loss_weights (list): loss weight of cascade stages. semantic_roi_extractor (dict): config to init semantic roi extractor. semantic_head (dict): config to init semantic head. feat_relay_head (dict): config to init feature_relay_head. glbctx_head (dict): config to init global context head. """ def __init__(self, num_stages, stage_loss_weights, semantic_roi_extractor=None, semantic_head=None, feat_relay_head=None, glbctx_head=None, **kwargs): super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights, **kwargs) assert self.with_bbox and self.with_mask assert not self.with_shared_head # shared head is not supported if semantic_head is not None: self.semantic_roi_extractor = build_roi_extractor( semantic_roi_extractor) self.semantic_head = build_head(semantic_head) if feat_relay_head is not None: self.feat_relay_head = build_head(feat_relay_head) if glbctx_head is not None: self.glbctx_head = build_head(glbctx_head) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) self.mask_head = build_head(mask_head) @property def with_semantic(self): """bool: whether the head has semantic head""" return hasattr(self, 'semantic_head') and self.semantic_head is not None @property def with_feat_relay(self): """bool: whether the head has feature relay head""" return (hasattr(self, 'feat_relay_head') and self.feat_relay_head is not None) @property def with_glbctx(self): """bool: whether the head has global context head""" return hasattr(self, 'glbctx_head') and self.glbctx_head is not None def _fuse_glbctx(self, roi_feats, glbctx_feat, rois): """Fuse global context feats with roi feats.""" assert roi_feats.size(0) == rois.size(0) img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long() fused_feats = torch.zeros_like(roi_feats) for img_id in img_inds: inds = (rois[:, 0] == img_id.item()) fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id] return fused_feats def _slice_pos_feats(self, feats, sampling_results): """Get features from pos rois.""" num_rois = [res.bboxes.size(0) for res in sampling_results] num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results] inds = torch.zeros(sum(num_rois), dtype=torch.bool) start = 0 for i in range(len(num_rois)): start = 0 if i == 0 else start + num_rois[i - 1] stop = start + num_pos_rois[i] inds[start:stop] = 1 sliced_feats = feats[inds] return sliced_feats def _bbox_forward(self, stage, x, rois, semantic_feat=None, glbctx_feat=None): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_semantic and semantic_feat is not None: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats = bbox_feats + bbox_semantic_feat if self.with_glbctx and glbctx_feat is not None: bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois) cls_score, bbox_pred, relayed_feat = bbox_head( bbox_feats, return_shared_feat=True) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, relayed_feat=relayed_feat) return bbox_results def _mask_forward(self, x, rois, semantic_feat=None, glbctx_feat=None, relayed_feat=None): """Mask head forward function used in both training and testing.""" mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], rois) if self.with_semantic and semantic_feat is not None: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats = mask_feats + mask_semantic_feat if self.with_glbctx and glbctx_feat is not None: mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois) if self.with_feat_relay and relayed_feat is not None: mask_feats = mask_feats + relayed_feat mask_pred = self.mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred) return mask_results def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat=None, glbctx_feat=None): """Run forward function and calculate loss for box head in training.""" bbox_head = self.bbox_head[stage] rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward( stage, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) return bbox_results def _mask_forward_train(self, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat=None, glbctx_feat=None, relayed_feat=None): """Run forward function and calculate loss for mask head in training.""" pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward( x, pos_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results = loss_mask return mask_results def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, gt_semantic_seg=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposal_list (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None, list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None, Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. gt_semantic_seg (None, list[Tensor]): semantic segmentation masks used if the architecture supports semantic segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ losses = dict() # semantic segmentation branch if self.with_semantic: semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) losses['loss_semantic_seg'] = loss_seg else: semantic_feat = None # global context branch if self.with_glbctx: mc_pred, glbctx_feat = self.glbctx_head(x) loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels) losses['loss_glbctx'] = loss_glbctx else: glbctx_feat = None for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) bbox_results = \ self._bbox_forward_train( i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat, glbctx_feat) roi_labels = bbox_results['bbox_targets'][0] for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine boxes if i < self.num_stages - 1: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) if self.with_feat_relay: relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'], sampling_results) relayed_feat = self.feat_relay_head(relayed_feat) else: relayed_feat = None mask_results = self._mask_forward_train(x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat, glbctx_feat, relayed_feat) mask_lw = sum(self.stage_loss_weights) losses['loss_mask'] = mask_lw * mask_results['loss_mask'] return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None if self.with_glbctx: mc_pred, glbctx_feat = self.glbctx_head(x) else: glbctx_feat = None num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) if rois.shape[0] == 0: # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head.num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) bbox_pred = bbox_pred.split(num_proposals_per_img, 0) ms_scores.append(cls_score) if i < self.num_stages - 1: refine_rois_list = [] for j in range(num_imgs): if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refine_rois = bbox_head.regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_rois_list.append(refine_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) det_bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head.num_classes det_segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) # get relay feature on mask_rois bbox_results = self._bbox_forward( -1, x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) relayed_feat = bbox_results['relayed_feat'] relayed_feat = self.feat_relay_head(relayed_feat) mask_results = self._mask_forward( x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) mask_preds = mask_pred.split(num_bbox_per_img, 0) # apply mask post-processing to each image individually det_segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: det_segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) det_segm_results.append(segm_result) # return results if self.with_mask: return list(zip(det_bbox_results, det_segm_results)) else: return det_bbox_results def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): if self.with_semantic: semantic_feats = [ self.semantic_head(feat)[1] for feat in img_feats ] else: semantic_feats = [None] * len(img_metas) if self.with_glbctx: glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats] else: glbctx_feats = [None] * len(img_metas) rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta, semantic_feat, glbctx_feat in zip( img_feats, img_metas, semantic_feats, glbctx_feats): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) if rois.shape[0] == 0: # There is no proposal in the single image aug_bboxes.append(rois.new_zeros(0, 4)) aug_scores.append(rois.new_zeros(0, 1)) continue for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: bbox_label = bbox_results['cls_score'].argmax(dim=1) rois = bbox_head.regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) det_bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: det_segm_results = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta, semantic_feat, glbctx_feat in zip( img_feats, img_metas, semantic_feats, glbctx_feats): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) # get relay feature on mask_rois bbox_results = self._bbox_forward( -1, x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) relayed_feat = bbox_results['relayed_feat'] relayed_feat = self.feat_relay_head(relayed_feat) mask_results = self._mask_forward( x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_pred = mask_results['mask_pred'] aug_masks.append(mask_pred.sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] det_segm_results = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return [(det_bbox_results, det_segm_results)] else: return [det_bbox_results] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/shared_heads/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .res_layer import ResLayer __all__ = ['ResLayer'] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/shared_heads/res_layer.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.backbones import ResNet from mmdet.models.builder import SHARED_HEADS from mmdet.models.utils import ResLayer as _ResLayer @SHARED_HEADS.register_module() class ResLayer(BaseModule): def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None, pretrained=None, init_cfg=None): super(ResLayer, self).__init__(init_cfg) self.norm_eval = norm_eval self.norm_cfg = norm_cfg self.stage = stage self.fp16_enabled = False block, stage_blocks = ResNet.arch_settings[depth] stage_block = stage_blocks[stage] planes = 64 * 2**stage inplanes = 64 * 2**(stage - 1) * block.expansion res_layer = _ResLayer( block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn) self.add_module(f'layer{stage + 1}', res_layer) assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is a deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') @auto_fp16() def forward(self, x): res_layer = getattr(self, f'layer{self.stage + 1}') out = res_layer(x) return out def train(self, mode=True): super(ResLayer, self).train(mode) if self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/sparse_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh from mmdet.core.bbox.samplers import PseudoSampler from ..builder import HEADS from .cascade_roi_head import CascadeRoIHead @HEADS.register_module() class SparseRoIHead(CascadeRoIHead): r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with Learnable Proposals `_ and `Instances as Queries `_ Args: num_stages (int): Number of stage whole iterative process. Defaults to 6. stage_loss_weights (Tuple[float]): The loss weight of each stage. By default all stages have the same weight 1. bbox_roi_extractor (dict): Config of box roi extractor. mask_roi_extractor (dict): Config of mask roi extractor. bbox_head (dict): Config of box head. mask_head (dict): Config of mask head. train_cfg (dict, optional): Configuration information in train stage. Defaults to None. test_cfg (dict, optional): Configuration information in test stage. Defaults to None. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_stages=6, stage_loss_weights=(1, 1, 1, 1, 1, 1), proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_roi_extractor=None, bbox_head=dict( type='DIIHead', num_classes=80, num_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, hidden_channels=256, dropout=0.0, roi_feat_size=7, ffn_act_cfg=dict(type='ReLU', inplace=True)), mask_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): assert bbox_roi_extractor is not None assert bbox_head is not None assert len(stage_loss_weights) == num_stages self.num_stages = num_stages self.stage_loss_weights = stage_loss_weights self.proposal_feature_channel = proposal_feature_channel super(SparseRoIHead, self).__init__( num_stages, stage_loss_weights, bbox_roi_extractor=bbox_roi_extractor, mask_roi_extractor=mask_roi_extractor, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) # train_cfg would be None when run the test.py if train_cfg is not None: for stage in range(num_stages): assert isinstance(self.bbox_sampler[stage], PseudoSampler), \ 'Sparse R-CNN and QueryInst only support `PseudoSampler`' def _bbox_forward(self, stage, x, rois, object_feats, img_metas): """Box head forward function used in both training and testing. Returns all regression, classification results and a intermediate feature. Args: stage (int): The index of current stage in iterative process. x (List[Tensor]): List of FPN features rois (Tensor): Rois in total batch. With shape (num_proposal, 5). the last dimension 5 represents (img_index, x1, y1, x2, y2). object_feats (Tensor): The object feature extracted from the previous stage. img_metas (dict): meta information of images. Returns: dict[str, Tensor]: a dictionary of bbox head outputs, Containing the following results: - cls_score (Tensor): The score of each class, has shape (batch_size, num_proposals, num_classes) when use focal loss or (batch_size, num_proposals, num_classes+1) otherwise. - decode_bbox_pred (Tensor): The regression results with shape (batch_size, num_proposal, 4). The last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - object_feats (Tensor): The object feature extracted from current stage - detach_cls_score_list (list[Tensor]): The detached classification results, length is batch_size, and each tensor has shape (num_proposal, num_classes). - detach_proposal_list (list[tensor]): The detached regression results, length is batch_size, and each tensor has shape (num_proposal, 4). The last dimension 4 represents [tl_x, tl_y, br_x, br_y]. """ num_imgs = len(img_metas) bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) cls_score, bbox_pred, object_feats, attn_feats = bbox_head( bbox_feats, object_feats) proposal_list = self.bbox_head[stage].refine_bboxes( rois, rois.new_zeros(len(rois)), # dummy arg bbox_pred.view(-1, bbox_pred.size(-1)), [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)], img_metas) bbox_results = dict( cls_score=cls_score, decode_bbox_pred=torch.cat(proposal_list), object_feats=object_feats, attn_feats=attn_feats, # detach then use it in label assign detach_cls_score_list=[ cls_score[i].detach() for i in range(num_imgs) ], detach_proposal_list=[item.detach() for item in proposal_list]) return bbox_results def _mask_forward(self, stage, x, rois, attn_feats): """Mask head forward function used in both training and testing.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore mask_pred = mask_head(mask_feats, attn_feats) mask_results = dict(mask_pred=mask_pred) return mask_results def _mask_forward_train(self, stage, x, attn_feats, sampling_results, gt_masks, rcnn_train_cfg): """Run forward function and calculate loss for mask head in training.""" pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) attn_feats = torch.cat([ feats[res.pos_inds] for (feats, res) in zip(attn_feats, sampling_results) ]) mask_results = self._mask_forward(stage, x, pos_rois, attn_feats) mask_targets = self.mask_head[stage].get_targets( sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results.update(loss_mask) return mask_results def forward_train(self, x, proposal_boxes, proposal_features, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, imgs_whwh=None, gt_masks=None): """Forward function in training stage. Args: x (list[Tensor]): list of multi-level img features. proposals (Tensor): Decoded proposal bboxes, has shape (batch_size, num_proposals, 4) proposal_features (Tensor): Expanded proposal features, has shape (batch_size, num_proposals, proposal_feature_channel) img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. imgs_whwh (Tensor): Tensor with shape (batch_size, 4), the dimension means [img_width,img_height, img_width, img_height]. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components of all stage. """ num_imgs = len(img_metas) num_proposals = proposal_boxes.size(1) imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1) all_stage_bbox_results = [] proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] object_feats = proposal_features all_stage_loss = {} for stage in range(self.num_stages): rois = bbox2roi(proposal_list) bbox_results = self._bbox_forward(stage, x, rois, object_feats, img_metas) all_stage_bbox_results.append(bbox_results) if gt_bboxes_ignore is None: # TODO support ignore gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] cls_pred_list = bbox_results['detach_cls_score_list'] proposal_list = bbox_results['detach_proposal_list'] for i in range(num_imgs): normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] / imgs_whwh[i]) assign_result = self.bbox_assigner[stage].assign( normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i], gt_labels[i], img_metas[i]) sampling_result = self.bbox_sampler[stage].sample( assign_result, proposal_list[i], gt_bboxes[i]) sampling_results.append(sampling_result) bbox_targets = self.bbox_head[stage].get_targets( sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage], True) cls_score = bbox_results['cls_score'] decode_bbox_pred = bbox_results['decode_bbox_pred'] single_stage_loss = self.bbox_head[stage].loss( cls_score.view(-1, cls_score.size(-1)), decode_bbox_pred.view(-1, 4), *bbox_targets, imgs_whwh=imgs_whwh) if self.with_mask: mask_results = self._mask_forward_train( stage, x, bbox_results['attn_feats'], sampling_results, gt_masks, self.train_cfg[stage]) single_stage_loss['loss_mask'] = mask_results['loss_mask'] for key, value in single_stage_loss.items(): all_stage_loss[f'stage{stage}_{key}'] = value * \ self.stage_loss_weights[stage] object_feats = bbox_results['object_feats'] return all_stage_loss def simple_test(self, x, proposal_boxes, proposal_features, img_metas, imgs_whwh, rescale=False): """Test without augmentation. Args: x (list[Tensor]): list of multi-level img features. proposal_boxes (Tensor): Decoded proposal bboxes, has shape (batch_size, num_proposals, 4) proposal_features (Tensor): Expanded proposal features, has shape (batch_size, num_proposals, proposal_feature_channel) img_metas (dict): meta information of images. imgs_whwh (Tensor): Tensor with shape (batch_size, 4), the dimension means [img_width,img_height, img_width, img_height]. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has a mask branch, it is a list[tuple] that contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ assert self.with_bbox, 'Bbox head must be implemented.' # Decode initial proposals num_imgs = len(img_metas) proposal_list = [proposal_boxes[i] for i in range(num_imgs)] ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) object_feats = proposal_features if all([proposal.shape[0] == 0 for proposal in proposal_list]): # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for i in range(self.bbox_head[-1].num_classes) ]] * num_imgs return bbox_results for stage in range(self.num_stages): rois = bbox2roi(proposal_list) bbox_results = self._bbox_forward(stage, x, rois, object_feats, img_metas) object_feats = bbox_results['object_feats'] cls_score = bbox_results['cls_score'] proposal_list = bbox_results['detach_proposal_list'] if self.with_mask: rois = bbox2roi(proposal_list) mask_results = self._mask_forward(stage, x, rois, bbox_results['attn_feats']) mask_results['mask_pred'] = mask_results['mask_pred'].reshape( num_imgs, -1, *mask_results['mask_pred'].size()[1:]) num_classes = self.bbox_head[-1].num_classes det_bboxes = [] det_labels = [] if self.bbox_head[-1].loss_cls.use_sigmoid: cls_score = cls_score.sigmoid() else: cls_score = cls_score.softmax(-1)[..., :-1] for img_id in range(num_imgs): cls_score_per_img = cls_score[img_id] scores_per_img, topk_indices = cls_score_per_img.flatten( 0, 1).topk( self.test_cfg.max_per_img, sorted=False) labels_per_img = topk_indices % num_classes bbox_pred_per_img = proposal_list[img_id][topk_indices // num_classes] if rescale: scale_factor = img_metas[img_id]['scale_factor'] bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor) det_bboxes.append( torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1)) det_labels.append(labels_per_img) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], num_classes) for i in range(num_imgs) ] if self.with_mask: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] segm_results = [] mask_pred = mask_results['mask_pred'] for img_id in range(num_imgs): mask_pred_per_img = mask_pred[img_id].flatten(0, 1)[topk_indices] mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat( 1, num_classes, 1, 1) segm_result = self.mask_head[-1].get_seg_masks( mask_pred_per_img, _bboxes[img_id], det_labels[img_id], self.test_cfg, ori_shapes[img_id], scale_factors[img_id], rescale) segm_results.append(segm_result) if self.with_mask: results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results def aug_test(self, features, proposal_list, img_metas, rescale=False): raise NotImplementedError( 'Sparse R-CNN and QueryInst does not support `aug_test`') def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas): """Dummy forward function when do the flops computing.""" all_stage_bbox_results = [] proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] object_feats = proposal_features if self.with_bbox: for stage in range(self.num_stages): rois = bbox2roi(proposal_list) bbox_results = self._bbox_forward(stage, x, rois, object_feats, img_metas) all_stage_bbox_results.append((bbox_results, )) proposal_list = bbox_results['detach_proposal_list'] object_feats = bbox_results['object_feats'] if self.with_mask: rois = bbox2roi(proposal_list) mask_results = self._mask_forward( stage, x, rois, bbox_results['attn_feats']) all_stage_bbox_results[-1] += (mask_results, ) return all_stage_bbox_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/standard_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler from ..builder import HEADS, build_head, build_roi_extractor from .base_roi_head import BaseRoIHead from .test_mixins import BBoxTestMixin, MaskTestMixin @HEADS.register_module() class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): """Simplest base roi head including one bbox head and one mask head.""" def init_assigner_sampler(self): """Initialize assigner and sampler.""" self.bbox_assigner = None self.bbox_sampler = None if self.train_cfg: self.bbox_assigner = build_assigner(self.train_cfg.assigner) self.bbox_sampler = build_sampler( self.train_cfg.sampler, context=self) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize ``bbox_head``""" self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor) self.bbox_head = build_head(bbox_head) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor self.mask_head = build_head(mask_head) def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, **kwargs): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # assign gts and sample proposals if self.with_bbox or self.with_mask: num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign( proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) return losses def _bbox_forward(self, x, rois): """Box head forward function used in both training and testing.""" # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): """Run forward function and calculate loss for box head in training.""" rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update(loss_bbox=loss_bbox) return bbox_results def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for mask head in training.""" if not self.share_roi_extractor: pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward(x, pos_rois) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_results = self._mask_forward( x, pos_inds=pos_inds, bbox_feats=bbox_feats) mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, self.train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets) return mask_results def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): """Mask head forward function used in both training and testing.""" assert ((rois is not None) ^ (pos_inds is not None and bbox_feats is not None)) if rois is not None: mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: assert bbox_feats is not None mask_feats = bbox_feats[pos_inds] mask_pred = self.mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats) return mask_results async def async_simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Async test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = await self.async_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=rescale) bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) if not self.with_mask: return bbox_results else: segm_results = await self.async_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale, mask_test_cfg=self.test_cfg.get('mask')) return bbox_results, segm_results def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = self.simple_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=rescale) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head.num_classes) for i in range(len(det_bboxes)) ] if not self.with_mask: return bbox_results else: segm_results = self.simple_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale) return list(zip(bbox_results, segm_results)) def aug_test(self, x, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas, proposal_list, self.test_cfg) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes) # det_bboxes always keep the original scale if self.with_mask: segm_results = self.aug_test_mask(x, img_metas, det_bboxes, det_labels) return [(bbox_results, segm_results)] else: return [bbox_results] def onnx_export(self, x, proposals, img_metas, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = self.bbox_onnx_export( x, img_metas, proposals, self.test_cfg, rescale=rescale) if not self.with_mask: return det_bboxes, det_labels else: segm_results = self.mask_onnx_export( x, img_metas, det_bboxes, det_labels, rescale=rescale) return det_bboxes, det_labels, segm_results def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): """Export mask branch to onnx which supports batch inference. Args: x (tuple[Tensor]): Feature maps of all scale level. img_metas (list[dict]): Image meta info. det_bboxes (Tensor): Bboxes and corresponding scores. has shape [N, num_bboxes, 5]. det_labels (Tensor): class labels of shape [N, num_bboxes]. Returns: Tensor: The segmentation results of shape [N, num_bboxes, image_height, image_width]. """ # image shapes of images in the batch if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): raise RuntimeError('[ONNX Error] Can not record MaskHead ' 'as it has not been executed this time') batch_size = det_bboxes.size(0) # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. det_bboxes = det_bboxes[..., :4] batch_index = torch.arange( det_bboxes.size(0), device=det_bboxes.device).float().view( -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) mask_rois = mask_rois.view(-1, 5) mask_results = self._mask_forward(x, mask_rois) mask_pred = mask_results['mask_pred'] max_shape = img_metas[0]['img_shape_for_onnx'] num_det = det_bboxes.shape[1] det_bboxes = det_bboxes.reshape(-1, 4) det_labels = det_labels.reshape(-1) segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, det_labels, self.test_cfg, max_shape) segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], max_shape[1]) return segm_results def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg, **kwargs): """Export bbox branch to onnx which supports batch inference. Args: x (tuple[Tensor]): Feature maps of all scale level. img_metas (list[dict]): Image meta info. proposals (Tensor): Region proposals with batch dimension, has shape [N, num_bboxes, 5]. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. Returns: tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5] and class labels of shape [N, num_bboxes]. """ # get origin input shape to support onnx dynamic input shape assert len( img_metas ) == 1, 'Only support one input image while in exporting to ONNX' img_shapes = img_metas[0]['img_shape_for_onnx'] rois = proposals batch_index = torch.arange( rois.size(0), device=rois.device).float().view(-1, 1, 1).expand( rois.size(0), rois.size(1), 1) rois = torch.cat([batch_index, rois[..., :4]], dim=-1) batch_size = rois.shape[0] num_proposals_per_img = rois.shape[1] # Eliminate the batch dimension rois = rois.view(-1, 5) bbox_results = self._bbox_forward(x, rois) cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] # Recover the batch dimension rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) cls_score = cls_score.reshape(batch_size, num_proposals_per_img, cls_score.size(-1)) bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, bbox_pred.size(-1)) det_bboxes, det_labels = self.bbox_head.onnx_export( rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg) return det_bboxes, det_labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/test_mixins.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import sys import warnings import numpy as np import torch from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import completed class BBoxTestMixin: if sys.version_info >= (3, 7): async def async_test_bboxes(self, x, img_metas, proposals, rcnn_test_cfg, rescale=False, **kwargs): """Asynchronized test for box head without augmentation.""" rois = bbox2roi(proposals) roi_feats = self.bbox_roi_extractor( x[:len(self.bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: roi_feats = self.shared_head(roi_feats) sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017) async with completed( __name__, 'bbox_head_forward', sleep_interval=sleep_interval): cls_score, bbox_pred = self.bbox_head(roi_feats) img_shape = img_metas[0]['img_shape'] scale_factor = img_metas[0]['scale_factor'] det_bboxes, det_labels = self.bbox_head.get_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) return det_bboxes, det_labels def simple_test_bboxes(self, x, img_metas, proposals, rcnn_test_cfg, rescale=False): """Test only det bboxes without augmentation. Args: x (tuple[Tensor]): Feature maps of all scale level. img_metas (list[dict]): Image meta info. proposals (List[Tensor]): Region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Default: False. Returns: tuple[list[Tensor], list[Tensor]]: The first list contains the boxes of the corresponding image in a batch, each tensor has the shape (num_boxes, 5) and last dimension 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor in the second list is the labels with shape (num_boxes, ). The length of both lists should be equal to batch_size. """ rois = bbox2roi(proposals) if rois.shape[0] == 0: batch_size = len(proposals) det_bbox = rois.new_zeros(0, 5) det_label = rois.new_zeros((0, ), dtype=torch.long) if rcnn_test_cfg is None: det_bbox = det_bbox[:, :4] det_label = rois.new_zeros( (0, self.bbox_head.fc_cls.out_features)) # There is no proposal in the whole batch return [det_bbox] * batch_size, [det_label] * batch_size bbox_results = self._bbox_forward(x, rois) img_shapes = tuple(meta['img_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposals) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) # some detector with_reg is False, bbox_pred will be None if bbox_pred is not None: # TODO move this to a sabl_roi_head # the bbox prediction of some detectors like SABL is not Tensor if isinstance(bbox_pred, torch.Tensor): bbox_pred = bbox_pred.split(num_proposals_per_img, 0) else: bbox_pred = self.bbox_head.bbox_pred_split( bbox_pred, num_proposals_per_img) else: bbox_pred = (None, ) * len(proposals) # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(len(proposals)): if rois[i].shape[0] == 0: # There is no proposal in the single image det_bbox = rois[i].new_zeros(0, 5) det_label = rois[i].new_zeros((0, ), dtype=torch.long) if rcnn_test_cfg is None: det_bbox = det_bbox[:, :4] det_label = rois[i].new_zeros( (0, self.bbox_head.fc_cls.out_features)) else: det_bbox, det_label = self.bbox_head.get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) return det_bboxes, det_labels def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): """Test det bboxes with test time augmentation.""" aug_bboxes = [] aug_scores = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] # TODO more flexible proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) rois = bbox2roi([proposals]) bbox_results = self._bbox_forward(x, rois) bboxes, scores = self.bbox_head.get_bboxes( rois, bbox_results['cls_score'], bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) if merged_bboxes.shape[0] == 0: # There is no proposal in the single image det_bboxes = merged_bboxes.new_zeros(0, 5) det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long) else: det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) return det_bboxes, det_labels class MaskTestMixin: if sys.version_info >= (3, 7): async def async_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False, mask_test_cfg=None): """Asynchronized test for mask head without augmentation.""" # image shape of the first image in the batch (only one) ori_shape = img_metas[0]['ori_shape'] scale_factor = img_metas[0]['scale_factor'] if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: if rescale and not isinstance(scale_factor, (float, torch.Tensor)): scale_factor = det_bboxes.new_tensor(scale_factor) _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor( x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'): sleep_interval = mask_test_cfg['async_sleep_interval'] else: sleep_interval = 0.035 async with completed( __name__, 'mask_head_forward', sleep_interval=sleep_interval): mask_pred = self.mask_head(mask_feats) segm_result = self.mask_head.get_seg_masks( mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape, scale_factor, rescale) return segm_result def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Simple test for mask head without augmentation.""" # image shapes of images in the batch ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) if isinstance(scale_factors[0], float): warnings.warn( 'Scale factor in img_metas should be a ' 'ndarray with shape (4,) ' 'arrange as (factor_w, factor_h, factor_w, factor_h), ' 'The scale_factor with float type has been deprecated. ') scale_factors = np.array([scale_factors] * 4, dtype=np.float32) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.mask_head.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale: scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] mask_preds = mask_pred.split(num_mask_roi_per_img, 0) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) return segm_results def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): """Test for mask head with test time augmentation.""" if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta in zip(feats, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) mask_results = self._mask_forward(x, mask_rois) # convert to numpy array to save memory aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] scale_factor = det_bboxes.new_ones(4) segm_result = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, self.test_cfg, ori_shape, scale_factor=scale_factor, rescale=False) return segm_result ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/trident_roi_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops import batched_nms from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, multiclass_nms) from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead from ..builder import HEADS @HEADS.register_module() class TridentRoIHead(StandardRoIHead): """Trident roi head. Args: num_branch (int): Number of branches in TridentNet. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. """ def __init__(self, num_branch, test_branch_idx, **kwargs): self.num_branch = num_branch self.test_branch_idx = test_branch_idx super(TridentRoIHead, self).__init__(**kwargs) def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels): """Merge bbox predictions of each branch.""" if trident_det_bboxes.numel() == 0: det_bboxes = trident_det_bboxes.new_zeros((0, 5)) det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long) else: nms_bboxes = trident_det_bboxes[:, :4] nms_scores = trident_det_bboxes[:, 4].contiguous() nms_inds = trident_det_labels nms_cfg = self.test_cfg['nms'] det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds, nms_cfg) det_labels = trident_det_labels[keep] if self.test_cfg['max_per_img'] > 0: det_labels = det_labels[:self.test_cfg['max_per_img']] det_bboxes = det_bboxes[:self.test_cfg['max_per_img']] return det_bboxes, det_labels def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation as follows: 1. Compute prediction bbox and label per branch. 2. Merge predictions of each branch according to scores of bboxes, i.e., bboxes with higher score are kept to give top-k prediction. """ assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes_list, det_labels_list = self.simple_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=rescale) num_branch = self.num_branch if self.test_branch_idx == -1 else 1 for _ in range(len(det_bboxes_list)): if det_bboxes_list[_].shape[0] == 0: det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5)) det_bboxes, det_labels = [], [] for i in range(len(img_metas) // num_branch): det_result = self.merge_trident_bboxes( torch.cat(det_bboxes_list[i * num_branch:(i + 1) * num_branch]), torch.cat(det_labels_list[i * num_branch:(i + 1) * num_branch])) det_bboxes.append(det_result[0]) det_labels.append(det_result[1]) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head.num_classes) for i in range(len(det_bboxes)) ] return bbox_results def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): """Test det bboxes with test time augmentation.""" aug_bboxes = [] aug_scores = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] trident_bboxes, trident_scores = [], [] for branch_idx in range(len(proposal_list)): proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) rois = bbox2roi([proposals]) bbox_results = self._bbox_forward(x, rois) bboxes, scores = self.bbox_head.get_bboxes( rois, bbox_results['cls_score'], bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) trident_bboxes.append(bboxes) trident_scores.append(scores) aug_bboxes.append(torch.cat(trident_bboxes, 0)) aug_scores.append(torch.cat(trident_scores, 0)) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) return det_bboxes, det_labels ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .panoptic_fpn_head import PanopticFPNHead # noqa: F401,F403 from .panoptic_fusion_heads import * # noqa: F401,F403 ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/base_semantic_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch.nn.functional as F from mmcv.runner import BaseModule, force_fp32 from ..builder import build_loss from ..utils import interpolate_as class BaseSemanticHead(BaseModule, metaclass=ABCMeta): """Base module of Semantic Head. Args: num_classes (int): the number of classes. init_cfg (dict): the initialization config. loss_seg (dict): the loss of the semantic head. """ def __init__(self, num_classes, init_cfg=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=1.0)): super(BaseSemanticHead, self).__init__(init_cfg) self.loss_seg = build_loss(loss_seg) self.num_classes = num_classes @force_fp32(apply_to=('seg_preds', )) def loss(self, seg_preds, gt_semantic_seg): """Get the loss of semantic head. Args: seg_preds (Tensor): The input logits with the shape (N, C, H, W). gt_semantic_seg: The ground truth of semantic segmentation with the shape (N, H, W). label_bias: The starting number of the semantic label. Default: 1. Returns: dict: the loss of semantic head. """ if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]: seg_preds = interpolate_as(seg_preds, gt_semantic_seg) seg_preds = seg_preds.permute((0, 2, 3, 1)) loss_seg = self.loss_seg( seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C] gt_semantic_seg.reshape(-1).long()) return dict(loss_seg=loss_seg) @abstractmethod def forward(self, x): """Placeholder of forward function. Returns: dict[str, Tensor]: A dictionary, including features and predicted scores. Required keys: 'seg_preds' and 'feats'. """ pass def forward_train(self, x, gt_semantic_seg): output = self.forward(x) seg_preds = output['seg_preds'] return self.loss(seg_preds, gt_semantic_seg) def simple_test(self, x, img_metas, rescale=False): output = self.forward(x) seg_preds = output['seg_preds'] seg_preds = F.interpolate( seg_preds, size=img_metas[0]['pad_shape'][:2], mode='bilinear', align_corners=False) if rescale: h, w, _ = img_metas[0]['img_shape'] seg_preds = seg_preds[:, :, :h, :w] h, w, _ = img_metas[0]['ori_shape'] seg_preds = F.interpolate( seg_preds, size=(h, w), mode='bilinear', align_corners=False) return seg_preds ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/panoptic_fpn_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn from mmcv.runner import ModuleList from ..builder import HEADS from ..utils import ConvUpsample from .base_semantic_head import BaseSemanticHead @HEADS.register_module() class PanopticFPNHead(BaseSemanticHead): """PanopticFPNHead used in Panoptic FPN. In this head, the number of output channels is ``num_stuff_classes + 1``, including all stuff classes and one thing class. The stuff classes will be reset from ``0`` to ``num_stuff_classes - 1``, the thing classes will be merged to ``num_stuff_classes``-th channel. Arg: num_things_classes (int): Number of thing classes. Default: 80. num_stuff_classes (int): Number of stuff classes. Default: 53. num_classes (int): Number of classes, including all stuff classes and one thing class. This argument is deprecated, please use ``num_things_classes`` and ``num_stuff_classes``. The module will automatically infer the num_classes by ``num_stuff_classes + 1``. in_channels (int): Number of channels in the input feature map. inner_channels (int): Number of channels in inner features. start_level (int): The start level of the input features used in PanopticFPN. end_level (int): The end level of the used features, the ``end_level``-th layer will not be used. fg_range (tuple): Range of the foreground classes. It starts from ``0`` to ``num_things_classes-1``. Deprecated, please use ``num_things_classes`` directly. bg_range (tuple): Range of the background classes. It starts from ``num_things_classes`` to ``num_things_classes + num_stuff_classes - 1``. Deprecated, please use ``num_stuff_classes`` and ``num_things_classes`` directly. conv_cfg (dict): Dictionary to construct and config conv layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Use ``GN`` by default. init_cfg (dict or list[dict], optional): Initialization config dict. loss_seg (dict): the loss of the semantic head. """ def __init__(self, num_things_classes=80, num_stuff_classes=53, num_classes=None, in_channels=256, inner_channels=128, start_level=0, end_level=4, fg_range=None, bg_range=None, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), init_cfg=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=-1, loss_weight=1.0)): if num_classes is not None: warnings.warn( '`num_classes` is deprecated now, please set ' '`num_stuff_classes` directly, the `num_classes` will be ' 'set to `num_stuff_classes + 1`') # num_classes = num_stuff_classes + 1 for PanopticFPN. assert num_classes == num_stuff_classes + 1 super(PanopticFPNHead, self).__init__(num_stuff_classes + 1, init_cfg, loss_seg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes if fg_range is not None and bg_range is not None: self.fg_range = fg_range self.bg_range = bg_range self.num_things_classes = fg_range[1] - fg_range[0] + 1 self.num_stuff_classes = bg_range[1] - bg_range[0] + 1 warnings.warn( '`fg_range` and `bg_range` are deprecated now, ' f'please use `num_things_classes`={self.num_things_classes} ' f'and `num_stuff_classes`={self.num_stuff_classes} instead.') # Used feature layers are [start_level, end_level) self.start_level = start_level self.end_level = end_level self.num_stages = end_level - start_level self.inner_channels = inner_channels self.conv_upsample_layers = ModuleList() for i in range(start_level, end_level): self.conv_upsample_layers.append( ConvUpsample( in_channels, inner_channels, num_layers=i if i > 0 else 1, num_upsample=i if i > 0 else 0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, )) self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1) def _set_things_to_void(self, gt_semantic_seg): """Merge thing classes to one class. In PanopticFPN, the background labels will be reset from `0` to `self.num_stuff_classes-1`, the foreground labels will be merged to `self.num_stuff_classes`-th channel. """ gt_semantic_seg = gt_semantic_seg.int() fg_mask = gt_semantic_seg < self.num_things_classes bg_mask = (gt_semantic_seg >= self.num_things_classes) * ( gt_semantic_seg < self.num_things_classes + self.num_stuff_classes) new_gt_seg = torch.clone(gt_semantic_seg) new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.num_things_classes, new_gt_seg) new_gt_seg = torch.where(fg_mask, fg_mask.int() * self.num_stuff_classes, new_gt_seg) return new_gt_seg def loss(self, seg_preds, gt_semantic_seg): """The loss of PanopticFPN head. Things classes will be merged to one class in PanopticFPN. """ gt_semantic_seg = self._set_things_to_void(gt_semantic_seg) return super().loss(seg_preds, gt_semantic_seg) def init_weights(self): super().init_weights() nn.init.normal_(self.conv_logits.weight.data, 0, 0.01) self.conv_logits.bias.data.zero_() def forward(self, x): # the number of subnets must be not more than # the length of features. assert self.num_stages <= len(x) feats = [] for i, layer in enumerate(self.conv_upsample_layers): f = layer(x[self.start_level + i]) feats.append(f) feats = torch.sum(torch.stack(feats, dim=0), dim=0) seg_preds = self.conv_logits(feats) out = dict(seg_preds=seg_preds, feats=feats) return out ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .base_panoptic_fusion_head import \ BasePanopticFusionHead # noqa: F401,F403 from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403 from .maskformer_fusion_head import MaskFormerFusionHead # noqa: F401,F403 ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule from ...builder import build_loss class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta): """Base class for panoptic heads.""" def __init__(self, num_things_classes=80, num_stuff_classes=53, test_cfg=None, loss_panoptic=None, init_cfg=None, **kwargs): super(BasePanopticFusionHead, self).__init__(init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = num_things_classes + num_stuff_classes self.test_cfg = test_cfg if loss_panoptic: self.loss_panoptic = build_loss(loss_panoptic) else: self.loss_panoptic = None @property def with_loss(self): """bool: whether the panoptic head contains loss function.""" return self.loss_panoptic is not None @abstractmethod def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): """Forward function during training.""" @abstractmethod def simple_test(self, img_metas, det_labels, mask_preds, seg_preds, det_bboxes, cfg=None, **kwargs): """Test without augmentation.""" ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET from mmdet.models.builder import HEADS from .base_panoptic_fusion_head import BasePanopticFusionHead @HEADS.register_module() class HeuristicFusionHead(BasePanopticFusionHead): """Fusion Head with Heuristic method.""" def __init__(self, num_things_classes=80, num_stuff_classes=53, test_cfg=None, init_cfg=None, **kwargs): super(HeuristicFusionHead, self).__init__(num_things_classes, num_stuff_classes, test_cfg, None, init_cfg, **kwargs) def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): """HeuristicFusionHead has no training loss.""" return dict() def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5): """Lay instance masks to a result map. Args: bboxes: The bboxes results, (K, 4). labels: The labels of bboxes, (K, ). masks: The instance masks, (K, H, W). overlap_thr: Threshold to determine whether two masks overlap. default: 0.5. Returns: Tensor: The result map, (H, W). """ num_insts = bboxes.shape[0] id_map = torch.zeros( masks.shape[-2:], device=bboxes.device, dtype=torch.long) if num_insts == 0: return id_map, labels scores, bboxes = bboxes[:, -1], bboxes[:, :4] # Sort by score to use heuristic fusion order = torch.argsort(-scores) bboxes = bboxes[order] labels = labels[order] segm_masks = masks[order] instance_id = 1 left_labels = [] for idx in range(bboxes.shape[0]): _cls = labels[idx] _mask = segm_masks[idx] instance_id_map = torch.ones_like( _mask, dtype=torch.long) * instance_id area = _mask.sum() if area == 0: continue pasted = id_map > 0 intersect = (_mask * pasted).sum() if (intersect / (area + 1e-5)) > overlap_thr: continue _part = _mask * (~pasted) id_map = torch.where(_part, instance_id_map, id_map) left_labels.append(_cls) instance_id += 1 if len(left_labels) > 0: instance_labels = torch.stack(left_labels) else: instance_labels = bboxes.new_zeros((0, ), dtype=torch.long) assert instance_id == (len(instance_labels) + 1) return id_map, instance_labels def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds, **kwargs): """Fuse the results of instance and semantic segmentations. Args: det_bboxes: The bboxes results, (K, 4). det_labels: The labels of bboxes, (K,). mask_preds: The masks results, (K, H, W). seg_preds: The semantic segmentation results, (K, num_stuff + 1, H, W). Returns: Tensor : The panoptic segmentation result, (H, W). """ mask_preds = mask_preds >= self.test_cfg.mask_thr_binary id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds, self.test_cfg.mask_overlap) seg_results = seg_preds.argmax(dim=0) seg_results = seg_results + self.num_things_classes pan_results = seg_results instance_id = 1 for idx in range(det_labels.shape[0]): _mask = id_map == (idx + 1) if _mask.sum() == 0: continue _cls = labels[idx] # simply trust detection segment_id = _cls + instance_id * INSTANCE_OFFSET pan_results[_mask] = segment_id instance_id += 1 ids, counts = torch.unique( pan_results % INSTANCE_OFFSET, return_counts=True) stuff_ids = ids[ids >= self.num_things_classes] stuff_counts = counts[ids >= self.num_things_classes] ignore_stuff_ids = stuff_ids[ stuff_counts < self.test_cfg.stuff_area_limit] assert pan_results.ndim == 2 pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape( 1, 1, -1)).any(dim=2)] = self.num_classes return pan_results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn.functional as F from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET from mmdet.core.mask import mask2bbox from mmdet.models.builder import HEADS from .base_panoptic_fusion_head import BasePanopticFusionHead @HEADS.register_module() class MaskFormerFusionHead(BasePanopticFusionHead): def __init__(self, num_things_classes=80, num_stuff_classes=53, test_cfg=None, loss_panoptic=None, init_cfg=None, **kwargs): super().__init__(num_things_classes, num_stuff_classes, test_cfg, loss_panoptic, init_cfg, **kwargs) def forward_train(self, **kwargs): """MaskFormerFusionHead has no training loss.""" return dict() def panoptic_postprocess(self, mask_cls, mask_pred): """Panoptic segmengation inference. Args: mask_cls (Tensor): Classfication outputs of shape (num_queries, cls_out_channels) for a image. Note `cls_out_channels` should includes background. mask_pred (Tensor): Mask outputs of shape (num_queries, h, w) for a image. Returns: Tensor: Panoptic segment result of shape \ (h, w), each element in Tensor means: \ ``segment_id = _cls + instance_id * INSTANCE_OFFSET``. """ object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8) iou_thr = self.test_cfg.get('iou_thr', 0.8) filter_low_score = self.test_cfg.get('filter_low_score', False) scores, labels = F.softmax(mask_cls, dim=-1).max(-1) mask_pred = mask_pred.sigmoid() keep = labels.ne(self.num_classes) & (scores > object_mask_thr) cur_scores = scores[keep] cur_classes = labels[keep] cur_masks = mask_pred[keep] cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks h, w = cur_masks.shape[-2:] panoptic_seg = torch.full((h, w), self.num_classes, dtype=torch.int32, device=cur_masks.device) if cur_masks.shape[0] == 0: # We didn't detect any mask :( pass else: cur_mask_ids = cur_prob_masks.argmax(0) instance_id = 1 for k in range(cur_classes.shape[0]): pred_class = int(cur_classes[k].item()) isthing = pred_class < self.num_things_classes mask = cur_mask_ids == k mask_area = mask.sum().item() original_area = (cur_masks[k] >= 0.5).sum().item() if filter_low_score: mask = mask & (cur_masks[k] >= 0.5) if mask_area > 0 and original_area > 0: if mask_area / original_area < iou_thr: continue if not isthing: # different stuff regions of same class will be # merged here, and stuff share the instance_id 0. panoptic_seg[mask] = pred_class else: panoptic_seg[mask] = ( pred_class + instance_id * INSTANCE_OFFSET) instance_id += 1 return panoptic_seg def semantic_postprocess(self, mask_cls, mask_pred): """Semantic segmengation postprocess. Args: mask_cls (Tensor): Classfication outputs of shape (num_queries, cls_out_channels) for a image. Note `cls_out_channels` should includes background. mask_pred (Tensor): Mask outputs of shape (num_queries, h, w) for a image. Returns: Tensor: Semantic segment result of shape \ (cls_out_channels, h, w). """ # TODO add semantic segmentation result raise NotImplementedError def instance_postprocess(self, mask_cls, mask_pred): """Instance segmengation postprocess. Args: mask_cls (Tensor): Classfication outputs of shape (num_queries, cls_out_channels) for a image. Note `cls_out_channels` should includes background. mask_pred (Tensor): Mask outputs of shape (num_queries, h, w) for a image. Returns: tuple[Tensor]: Instance segmentation results. - labels_per_image (Tensor): Predicted labels,\ shape (n, ). - bboxes (Tensor): Bboxes and scores with shape (n, 5) of \ positive region in binary mask, the last column is scores. - mask_pred_binary (Tensor): Instance masks of \ shape (n, h, w). """ max_per_image = self.test_cfg.get('max_per_image', 100) num_queries = mask_cls.shape[0] # shape (num_queries, num_class) scores = F.softmax(mask_cls, dim=-1)[:, :-1] # shape (num_queries * num_class, ) labels = torch.arange(self.num_classes, device=mask_cls.device).\ unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) scores_per_image, top_indices = scores.flatten(0, 1).topk( max_per_image, sorted=False) labels_per_image = labels[top_indices] query_indices = top_indices // self.num_classes mask_pred = mask_pred[query_indices] # extract things is_thing = labels_per_image < self.num_things_classes scores_per_image = scores_per_image[is_thing] labels_per_image = labels_per_image[is_thing] mask_pred = mask_pred[is_thing] mask_pred_binary = (mask_pred > 0).float() mask_scores_per_image = (mask_pred.sigmoid() * mask_pred_binary).flatten(1).sum(1) / ( mask_pred_binary.flatten(1).sum(1) + 1e-6) det_scores = scores_per_image * mask_scores_per_image mask_pred_binary = mask_pred_binary.bool() bboxes = mask2bbox(mask_pred_binary) bboxes = torch.cat([bboxes, det_scores[:, None]], dim=-1) return labels_per_image, bboxes, mask_pred_binary def simple_test(self, mask_cls_results, mask_pred_results, img_metas, rescale=False, **kwargs): """Test segment without test-time aumengtation. Only the output of last decoder layers was used. Args: mask_cls_results (Tensor): Mask classification logits, shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. mask_pred_results (Tensor): Mask logits, shape (batch_size, num_queries, h, w). img_metas (list[dict]): List of image information. rescale (bool, optional): If True, return boxes in original image space. Default False. Returns: list[dict[str, Tensor | tuple[Tensor]]]: Semantic segmentation \ results and panoptic segmentation results for each \ image. .. code-block:: none [ { 'pan_results': Tensor, # shape = [h, w] 'ins_results': tuple[Tensor], # semantic segmentation results are not supported yet 'sem_results': Tensor }, ... ] """ panoptic_on = self.test_cfg.get('panoptic_on', True) semantic_on = self.test_cfg.get('semantic_on', False) instance_on = self.test_cfg.get('instance_on', False) assert not semantic_on, 'segmantic segmentation '\ 'results are not supported yet.' results = [] for mask_cls_result, mask_pred_result, meta in zip( mask_cls_results, mask_pred_results, img_metas): # remove padding img_height, img_width = meta['img_shape'][:2] mask_pred_result = mask_pred_result[:, :img_height, :img_width] if rescale: # return result in original resolution ori_height, ori_width = meta['ori_shape'][:2] mask_pred_result = F.interpolate( mask_pred_result[:, None], size=(ori_height, ori_width), mode='bilinear', align_corners=False)[:, 0] result = dict() if panoptic_on: pan_results = self.panoptic_postprocess( mask_cls_result, mask_pred_result) result['pan_results'] = pan_results if instance_on: ins_results = self.instance_postprocess( mask_cls_result, mask_pred_result) result['ins_results'] = ins_results if semantic_on: sem_results = self.semantic_postprocess( mask_cls_result, mask_pred_result) result['sem_results'] = sem_results results.append(result) return results ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .builder import build_linear_layer, build_transformer from .ckpt_convert import pvt_convert from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .gaussian_target import gaussian_radius, gen_gaussian_target from .inverted_residual import InvertedResidual from .make_divisible import make_divisible from .misc import interpolate_as, sigmoid_geometric_mean from .normed_predictor import NormedConv2d, NormedLinear from .panoptic_gt_processing import preprocess_panoptic_gt from .point_sample import (get_uncertain_point_coords_with_randomness, get_uncertainty) from .positional_encoding import (LearnedPositionalEncoding, SinePositionalEncoding) from .res_layer import ResLayer, SimplifiedBasicBlock from .se_layer import DyReLU, SELayer from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer, DynamicConv, PatchEmbed, Transformer, nchw_to_nlc, nlc_to_nchw) __all__ = [ 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer', 'build_transformer', 'build_linear_layer', 'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual', 'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean', 'preprocess_panoptic_gt', 'DyReLU', 'get_uncertain_point_coords_with_randomness', 'get_uncertainty' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/brick_wrappers.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version if torch.__version__ == 'parrots': TORCH_VERSION = torch.__version__ else: # torch.__version__ could be 1.3.1+cu92, we only need the first two # for comparison TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) def adaptive_avg_pool2d(input, output_size): """Handle empty batch dimension to adaptive_avg_pool2d. Args: input (tensor): 4D tensor. output_size (int, tuple[int,int]): the target output size. """ if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): if isinstance(output_size, int): output_size = [output_size, output_size] output_size = [*input.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(input, output_size) return empty else: return F.adaptive_avg_pool2d(input, output_size) class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d): """Handle empty batch dimension to AdaptiveAvgPool2d.""" def forward(self, x): # PyTorch 1.9 does not support empty tensor inference yet if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): output_size = self.output_size if isinstance(output_size, int): output_size = [output_size, output_size] else: output_size = [ v if v is not None else d for v, d in zip(output_size, x.size()[-2:]) ] output_size = [*x.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(x, output_size) return empty return super().forward(x) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/builder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.utils import Registry, build_from_cfg TRANSFORMER = Registry('Transformer') LINEAR_LAYERS = Registry('linear layers') def build_transformer(cfg, default_args=None): """Builder for Transformer.""" return build_from_cfg(cfg, TRANSFORMER, default_args) LINEAR_LAYERS.register_module('Linear', module=nn.Linear) def build_linear_layer(cfg, *args, **kwargs): """Build linear layer. Args: cfg (None or dict): The linear layer config, which should contain: - type (str): Layer type. - layer args: Args needed to instantiate an linear layer. args (argument list): Arguments passed to the `__init__` method of the corresponding linear layer. kwargs (keyword arguments): Keyword arguments passed to the `__init__` method of the corresponding linear layer. Returns: nn.Module: Created linear layer. """ if cfg is None: cfg_ = dict(type='Linear') else: if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') if 'type' not in cfg: raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if layer_type not in LINEAR_LAYERS: raise KeyError(f'Unrecognized linear type {layer_type}') else: linear_layer = LINEAR_LAYERS.get(layer_type) layer = linear_layer(*args, **kwargs, **cfg_) return layer ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/ckpt_convert.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. # This script consists of several convert functions which # can modify the weights of model in original repo to be # pre-trained weights. from collections import OrderedDict import torch def pvt_convert(ckpt): new_ckpt = OrderedDict() # Process the concat between q linear weights and kv linear weights use_abs_pos_embed = False use_conv_ffn = False for k in ckpt.keys(): if k.startswith('pos_embed'): use_abs_pos_embed = True if k.find('dwconv') >= 0: use_conv_ffn = True for k, v in ckpt.items(): if k.startswith('head'): continue if k.startswith('norm.'): continue if k.startswith('cls_token'): continue if k.startswith('pos_embed'): stage_i = int(k.replace('pos_embed', '')) new_k = k.replace(f'pos_embed{stage_i}', f'layers.{stage_i - 1}.1.0.pos_embed') if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 new_v = v[:, 1:, :] # remove cls token else: new_v = v elif k.startswith('patch_embed'): stage_i = int(k.split('.')[0].replace('patch_embed', '')) new_k = k.replace(f'patch_embed{stage_i}', f'layers.{stage_i - 1}.0') new_v = v if 'proj.' in new_k: new_k = new_k.replace('proj.', 'projection.') elif k.startswith('block'): stage_i = int(k.split('.')[0].replace('block', '')) layer_i = int(k.split('.')[1]) new_layer_i = layer_i + use_abs_pos_embed new_k = k.replace(f'block{stage_i}.{layer_i}', f'layers.{stage_i - 1}.1.{new_layer_i}') new_v = v if 'attn.q.' in new_k: sub_item_k = k.replace('q.', 'kv.') new_k = new_k.replace('q.', 'attn.in_proj_') new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) elif 'attn.kv.' in new_k: continue elif 'attn.proj.' in new_k: new_k = new_k.replace('proj.', 'attn.out_proj.') elif 'attn.sr.' in new_k: new_k = new_k.replace('sr.', 'sr.') elif 'mlp.' in new_k: string = f'{new_k}-' new_k = new_k.replace('mlp.', 'ffn.layers.') if 'fc1.weight' in new_k or 'fc2.weight' in new_k: new_v = v.reshape((*v.shape, 1, 1)) new_k = new_k.replace('fc1.', '0.') new_k = new_k.replace('dwconv.dwconv.', '1.') if use_conv_ffn: new_k = new_k.replace('fc2.', '4.') else: new_k = new_k.replace('fc2.', '3.') string += f'{new_k} {v.shape}-{new_v.shape}' elif k.startswith('norm'): stage_i = int(k[4]) new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') new_v = v else: new_k = k new_v = v new_ckpt[new_k] = new_v return new_ckpt def swin_converter(ckpt): new_ckpt = OrderedDict() def correct_unfold_reduction_order(x): out_channel, in_channel = x.shape x = x.reshape(out_channel, 4, in_channel // 4) x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel) return x def correct_unfold_norm_order(x): in_channel = x.shape[0] x = x.reshape(4, in_channel // 4) x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) return x for k, v in ckpt.items(): if k.startswith('head'): continue elif k.startswith('layers'): new_v = v if 'attn.' in k: new_k = k.replace('attn.', 'attn.w_msa.') elif 'mlp.' in k: if 'mlp.fc1.' in k: new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') elif 'mlp.fc2.' in k: new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') else: new_k = k.replace('mlp.', 'ffn.') elif 'downsample' in k: new_k = k if 'reduction.' in k: new_v = correct_unfold_reduction_order(v) elif 'norm.' in k: new_v = correct_unfold_norm_order(v) else: new_k = k new_k = new_k.replace('layers', 'stages', 1) elif k.startswith('patch_embed'): new_v = v if 'proj' in k: new_k = k.replace('proj', 'projection') else: new_k = k else: new_v = v new_k = k new_ckpt['backbone.' + new_k] = new_v return new_ckpt ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/conv_upsample.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, ModuleList class ConvUpsample(BaseModule): """ConvUpsample performs 2x upsampling after Conv. There are several `ConvModule` layers. In the first few layers, upsampling will be applied after each layer of convolution. The number of upsampling must be no more than the number of ConvModule layers. Args: in_channels (int): Number of channels in the input feature map. inner_channels (int): Number of channels produced by the convolution. num_layers (int): Number of convolution layers. num_upsample (int | optional): Number of upsampling layer. Must be no more than num_layers. Upsampling will be applied after the first ``num_upsample`` layers of convolution. Default: ``num_layers``. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: None. init_cfg (dict): Config dict for initialization. Default: None. kwargs (key word augments): Other augments used in ConvModule. """ def __init__(self, in_channels, inner_channels, num_layers=1, num_upsample=None, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): super(ConvUpsample, self).__init__(init_cfg) if num_upsample is None: num_upsample = num_layers assert num_upsample <= num_layers, \ f'num_upsample({num_upsample})must be no more than ' \ f'num_layers({num_layers})' self.num_layers = num_layers self.num_upsample = num_upsample self.conv = ModuleList() for i in range(num_layers): self.conv.append( ConvModule( in_channels, inner_channels, 3, padding=1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) in_channels = inner_channels def forward(self, x): num_upsample = self.num_upsample for i in range(self.num_layers): x = self.conv[i](x) if num_upsample > 0: num_upsample -= 1 x = F.interpolate( x, scale_factor=2, mode='bilinear', align_corners=False) return x ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/csp_layer.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import BaseModule class DarknetBottleneck(BaseModule): """The basic bottleneck block used in Darknet. Each ResBlock consists of two ConvModules and the input is added to the final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. The first convLayer has filter size of 1x1 and the second one has the filter size of 3x3. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. expansion (int): The kernel size of the convolution. Default: 0.5 add_identity (bool): Whether to add identity to the out. Default: True use_depthwise (bool): Whether to use depthwise separable convolution. Default: False conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish'). """ def __init__(self, in_channels, out_channels, expansion=0.5, add_identity=True, use_depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) hidden_channels = int(out_channels * expansion) conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule self.conv1 = ConvModule( in_channels, hidden_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv2 = conv( hidden_channels, out_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.add_identity = \ add_identity and in_channels == out_channels def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) if self.add_identity: return out + identity else: return out class CSPLayer(BaseModule): """Cross Stage Partial Layer. Args: in_channels (int): The input channels of the CSP layer. out_channels (int): The output channels of the CSP layer. expand_ratio (float): Ratio to adjust the number of channels of the hidden layer. Default: 0.5 num_blocks (int): Number of blocks. Default: 1 add_identity (bool): Whether to add identity in blocks. Default: True use_depthwise (bool): Whether to depthwise separable convolution in blocks. Default: False conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN') act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish') """ def __init__(self, in_channels, out_channels, expand_ratio=0.5, num_blocks=1, add_identity=True, use_depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) mid_channels = int(out_channels * expand_ratio) self.main_conv = ConvModule( in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.short_conv = ConvModule( in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.final_conv = ConvModule( 2 * mid_channels, out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.blocks = nn.Sequential(*[ DarknetBottleneck( mid_channels, mid_channels, 1.0, add_identity, use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) for _ in range(num_blocks) ]) def forward(self, x): x_short = self.short_conv(x) x_main = self.main_conv(x) x_main = self.blocks(x_main) x_final = torch.cat((x_main, x_short), dim=1) return self.final_conv(x_final) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/gaussian_target.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from math import sqrt import torch import torch.nn.functional as F def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): """Generate 2D gaussian kernel. Args: radius (int): Radius of gaussian kernel. sigma (int): Sigma of gaussian function. Default: 1. dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. device (str): Device of gaussian tensor. Default: 'cpu'. Returns: h (Tensor): Gaussian kernel with a ``(2 * radius + 1) * (2 * radius + 1)`` shape. """ x = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(1, -1) y = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() h[h < torch.finfo(h.dtype).eps * h.max()] = 0 return h def gen_gaussian_target(heatmap, center, radius, k=1): """Generate 2D gaussian heatmap. Args: heatmap (Tensor): Input heatmap, the gaussian kernel will cover on it and maintain the max value. center (list[int]): Coord of gaussian kernel's center. radius (int): Radius of gaussian kernel. k (int): Coefficient of gaussian kernel. Default: 1. Returns: out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. """ diameter = 2 * radius + 1 gaussian_kernel = gaussian2D( radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) x, y = center height, width = heatmap.shape[:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian_kernel[radius - top:radius + bottom, radius - left:radius + right] out_heatmap = heatmap torch.max( masked_heatmap, masked_gaussian * k, out=out_heatmap[y - top:y + bottom, x - left:x + right]) return out_heatmap def gaussian_radius(det_size, min_overlap): r"""Generate 2D gaussian radius. This function is modified from the `official github repo `_. Given ``min_overlap``, radius could computed by a quadratic equation according to Vieta's formulas. There are 3 cases for computing gaussian radius, details are following: - Explanation of figure: ``lt`` and ``br`` indicates the left-top and bottom-right corner of ground truth box. ``x`` indicates the generated corner at the limited position when ``radius=r``. - Case1: one corner is inside the gt box and the other is outside. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x----------+--+ | | | | | | | | height | | overlap | | | | | | | | | | v +--+---------br--+ - | | | +----------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} \\ {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case2: both two corners are inside the gt box. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x-------+ | | | | | | |overlap| | height | | | | | +-------x--+ | | | v +----------+-br - To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} \\ {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case3: both two corners are outside the gt box. .. code:: text |< width >| x--+----------------+ | | | +-lt-------------+ | - | | | | ^ | | | | | | overlap | | height | | | | | | | | v | +------------br--+ - | | | +----------------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} Args: det_size (list[int]): Shape of object. min_overlap (float): Min IoU with ground truth for boxes generated by keypoints inside the gaussian kernel. Returns: radius (int): Radius of gaussian kernel. """ height, width = det_size a1 = 1 b1 = (height + width) c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = sqrt(b1**2 - 4 * a1 * c1) r1 = (b1 - sq1) / (2 * a1) a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = sqrt(b2**2 - 4 * a2 * c2) r2 = (b2 - sq2) / (2 * a2) a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = sqrt(b3**2 - 4 * a3 * c3) r3 = (b3 + sq3) / (2 * a3) return min(r1, r2, r3) def get_local_maximum(heat, kernel=3): """Extract local maximum pixel with given kernel. Args: heat (Tensor): Target heatmap. kernel (int): Kernel size of max pooling. Default: 3. Returns: heat (Tensor): A heatmap where local maximum pixels maintain its own value and other positions are 0. """ pad = (kernel - 1) // 2 hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad) keep = (hmax == heat).float() return heat * keep def get_topk_from_heatmap(scores, k=20): """Get top k positions from heatmap. Args: scores (Tensor): Target heatmap with shape [batch, num_classes, height, width]. k (int): Target number. Default: 20. Returns: tuple[torch.Tensor]: Scores, indexes, categories and coords of topk keypoint. Containing following Tensors: - topk_scores (Tensor): Max scores of each topk keypoint. - topk_inds (Tensor): Indexes of each topk keypoint. - topk_clses (Tensor): Categories of each topk keypoint. - topk_ys (Tensor): Y-coord of each topk keypoint. - topk_xs (Tensor): X-coord of each topk keypoint. """ batch, _, height, width = scores.size() topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k) topk_clses = topk_inds // (height * width) topk_inds = topk_inds % (height * width) topk_ys = topk_inds // width topk_xs = (topk_inds % width).int().float() return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs def gather_feat(feat, ind, mask=None): """Gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. mask (Tensor | None): Mask of feature map. Default: None. Returns: feat (Tensor): Gathered feature. """ dim = feat.size(2) ind = ind.unsqueeze(2).repeat(1, 1, dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def transpose_and_gather_feat(feat, ind): """Transpose and gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. Returns: feat (Tensor): Transposed and gathered feature. """ feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = gather_feat(feat, ind) return feat ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/inverted_residual.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule from mmcv.cnn.bricks import DropPath from mmcv.runner import BaseModule from .se_layer import SELayer class InvertedResidual(BaseModule): """Inverted Residual Block. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. mid_channels (int): The input channels of the depthwise convolution. kernel_size (int): The kernel size of the depthwise convolution. Default: 3. stride (int): The stride of the depthwise convolution. Default: 1. se_cfg (dict): Config dict for se layer. Default: None, which means no se layer. with_expand_conv (bool): Use expand conv or not. If set False, mid_channels must be the same with in_channels. Default: True. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU'). drop_path_rate (float): stochastic depth rate. Defaults to 0. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Returns: Tensor: The output tensor. """ def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_expand_conv=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), drop_path_rate=0., with_cp=False, init_cfg=None): super(InvertedResidual, self).__init__(init_cfg) self.with_res_shortcut = (stride == 1 and in_channels == out_channels) assert stride in [1, 2], f'stride must in [1, 2]. ' \ f'But received {stride}.' self.with_cp = with_cp self.drop_path = DropPath( drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.with_se = se_cfg is not None self.with_expand_conv = with_expand_conv if self.with_se: assert isinstance(se_cfg, dict) if not self.with_expand_conv: assert mid_channels == in_channels if self.with_expand_conv: self.expand_conv = ConvModule( in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.depthwise_conv = ConvModule( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=mid_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if self.with_se: self.se = SELayer(**se_cfg) self.linear_conv = ConvModule( in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) def forward(self, x): def _inner_forward(x): out = x if self.with_expand_conv: out = self.expand_conv(out) out = self.depthwise_conv(out) if self.with_se: out = self.se(out) out = self.linear_conv(out) if self.with_res_shortcut: return x + self.drop_path(out) else: return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/make_divisible.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. def make_divisible(value, divisor, min_value=None, min_ratio=0.9): """Make divisible function. This function rounds the channel number to the nearest value that can be divisible by the divisor. It is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by divisor. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa Args: value (int): The original channel number. divisor (int): The divisor to fully divide the channel number. min_value (int): The minimum value of the output channel. Default: None, means that the minimum value equal to the divisor. min_ratio (float): The minimum ratio of the rounded channel number to the original channel number. Default: 0.9. Returns: int: The modified output channel number. """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than (1-min_ratio). if new_value < min_ratio * value: new_value += divisor return new_value ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/misc.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from torch.autograd import Function from torch.nn import functional as F class SigmoidGeometricMean(Function): """Forward and backward function of geometric mean of two sigmoid functions. This implementation with analytical gradient function substitutes the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The original implementation incurs none during gradient backprapagation if both x and y are very small values. """ @staticmethod def forward(ctx, x, y): x_sigmoid = x.sigmoid() y_sigmoid = y.sigmoid() z = (x_sigmoid * y_sigmoid).sqrt() ctx.save_for_backward(x_sigmoid, y_sigmoid, z) return z @staticmethod def backward(ctx, grad_output): x_sigmoid, y_sigmoid, z = ctx.saved_tensors grad_x = grad_output * z * (1 - x_sigmoid) / 2 grad_y = grad_output * z * (1 - y_sigmoid) / 2 return grad_x, grad_y sigmoid_geometric_mean = SigmoidGeometricMean.apply def interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` to the shape of the `target`. The `source` must be a Tensor, but the `target` can be a Tensor or a np.ndarray with the shape (..., target_h, target_w). Args: source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or (N, C, H, W). target (Tensor | np.ndarray): The interpolation target with the shape (..., target_h, target_w). mode (str): Algorithm used for interpolation. The options are the same as those in F.interpolate(). Default: ``'bilinear'``. align_corners (bool): The same as the argument in F.interpolate(). Returns: Tensor: The interpolated source Tensor. """ assert len(target.shape) >= 2 def _interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` (4D) to the shape of the `target`.""" target_h, target_w = target.shape[-2:] source_h, source_w = source.shape[-2:] if target_h != source_h or target_w != source_w: source = F.interpolate( source, size=(target_h, target_w), mode=mode, align_corners=align_corners) return source if len(source.shape) == 3: source = source[:, None, :, :] source = _interpolate_as(source, target, mode, align_corners) return source[:, 0, :, :] else: return _interpolate_as(source, target, mode, align_corners) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/normed_predictor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import CONV_LAYERS from .builder import LINEAR_LAYERS @LINEAR_LAYERS.register_module(name='NormedLinear') class NormedLinear(nn.Linear): """Normalized Linear Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs): super(NormedLinear, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.eps = eps self.init_weights() def init_weights(self): nn.init.normal_(self.weight, mean=0, std=0.01) if self.bias is not None: nn.init.constant_(self.bias, 0) def forward(self, x): weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x_ * self.tempearture return F.linear(x_, weight_, self.bias) @CONV_LAYERS.register_module(name='NormedConv2d') class NormedConv2d(nn.Conv2d): """Normalized Conv2d Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. norm_over_kernel (bool, optional): Normalize over kernel. Default to False. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, norm_over_kernel=False, **kwargs): super(NormedConv2d, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.norm_over_kernel = norm_over_kernel self.eps = eps def forward(self, x): if not self.norm_over_kernel: weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) else: weight_ = self.weight / ( self.weight.view(self.weight.size(0), -1).norm( dim=1, keepdim=True).pow(self.power)[..., None, None] + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x_ * self.tempearture if hasattr(self, 'conv2d_forward'): x_ = self.conv2d_forward(x_, weight_) else: if torch.__version__ >= '1.8': x_ = self._conv_forward(x_, weight_, self.bias) else: x_ = self._conv_forward(x_, weight_) return x_ ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/panoptic_gt_processing.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things, num_stuff, img_metas): """Preprocess the ground truth for a image. Args: gt_labels (Tensor): Ground truth labels of each bbox, with shape (num_gts, ). gt_masks (BitmapMasks): Ground truth masks of each instances of a image, shape (num_gts, h, w). gt_semantic_seg (Tensor | None): Ground truth of semantic segmentation with the shape (1, h, w). [0, num_thing_class - 1] means things, [num_thing_class, num_class-1] means stuff, 255 means VOID. It's None when training instance segmentation. img_metas (dict): List of image meta information. Returns: tuple: a tuple containing the following targets. - labels (Tensor): Ground truth class indices for a image, with shape (n, ), n is the sum of number of stuff type and number of instance in a image. - masks (Tensor): Ground truth mask for a image, with shape (n, h, w). Contains stuff and things when training panoptic segmentation, and things only when training instance segmentation. """ num_classes = num_things + num_stuff things_masks = gt_masks.pad(img_metas['pad_shape'][:2], pad_val=0)\ .to_tensor(dtype=torch.bool, device=gt_labels.device) if gt_semantic_seg is None: masks = things_masks.long() return gt_labels, masks things_labels = gt_labels gt_semantic_seg = gt_semantic_seg.squeeze(0) semantic_labels = torch.unique( gt_semantic_seg, sorted=False, return_inverse=False, return_counts=False) stuff_masks_list = [] stuff_labels_list = [] for label in semantic_labels: if label < num_things or label >= num_classes: continue stuff_mask = gt_semantic_seg == label stuff_masks_list.append(stuff_mask) stuff_labels_list.append(label) if len(stuff_masks_list) > 0: stuff_masks = torch.stack(stuff_masks_list, dim=0) stuff_labels = torch.stack(stuff_labels_list, dim=0) labels = torch.cat([things_labels, stuff_labels], dim=0) masks = torch.cat([things_masks, stuff_masks], dim=0) else: labels = things_labels masks = things_masks masks = masks.long() return labels, masks ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/point_sample.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops import point_sample def get_uncertainty(mask_pred, labels): """Estimate uncertainty based on pred logits. We estimate uncertainty as L1 distance between 0.0 and the logits prediction in 'mask_pred' for the foreground class in `classes`. Args: mask_pred (Tensor): mask predication logits, shape (num_rois, num_classes, mask_height, mask_width). labels (list[Tensor]): Either predicted or ground truth label for each predicted mask, of length num_rois. Returns: scores (Tensor): Uncertainty scores with the most uncertain locations having the highest uncertainty score, shape (num_rois, 1, mask_height, mask_width) """ if mask_pred.shape[1] == 1: gt_class_logits = mask_pred.clone() else: inds = torch.arange(mask_pred.shape[0], device=mask_pred.device) gt_class_logits = mask_pred[inds, labels].unsqueeze(1) return -torch.abs(gt_class_logits) def get_uncertain_point_coords_with_randomness(mask_pred, labels, num_points, oversample_ratio, importance_sample_ratio): """Get ``num_points`` most uncertain points with random points during train. Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The uncertainties are calculated for each point using 'get_uncertainty()' function that takes point's logit prediction as input. Args: mask_pred (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. labels (list): The ground truth class for each instance. num_points (int): The number of points to sample. oversample_ratio (int): Oversampling parameter. importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. Returns: point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains the coordinates sampled points. """ assert oversample_ratio >= 1 assert 0 <= importance_sample_ratio <= 1 batch_size = mask_pred.shape[0] num_sampled = int(num_points * oversample_ratio) point_coords = torch.rand( batch_size, num_sampled, 2, device=mask_pred.device) point_logits = point_sample(mask_pred, point_coords) # It is crucial to calculate uncertainty based on the sampled # prediction value for the points. Calculating uncertainties of the # coarse predictions first and sampling them for points leads to # incorrect results. To illustrate this: assume uncertainty func( # logits)=-abs(logits), a sampled point between two coarse # predictions with -1 and 1 logits has 0 logits, and therefore 0 # uncertainty value. However, if we calculate uncertainties for the # coarse predictions first, both will have -1 uncertainty, # and sampled point will get -1 uncertainty. point_uncertainties = get_uncertainty(point_logits, labels) num_uncertain_points = int(importance_sample_ratio * num_points) num_random_points = num_points - num_uncertain_points idx = torch.topk( point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] shift = num_sampled * torch.arange( batch_size, dtype=torch.long, device=mask_pred.device) idx += shift[:, None] point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( batch_size, num_uncertain_points, 2) if num_random_points > 0: rand_roi_coords = torch.rand( batch_size, num_random_points, 2, device=mask_pred.device) point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) return point_coords ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/positional_encoding.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING from mmcv.runner import BaseModule @POSITIONAL_ENCODING.register_module() class SinePositionalEncoding(BaseModule): """Position encoding with sine and cosine functions. See `End-to-End Object Detection with Transformers `_ for details. Args: num_feats (int): The feature dimension for each position along x-axis or y-axis. Note the final returned dimension for each position is 2 times of this value. temperature (int, optional): The temperature used for scaling the position embedding. Defaults to 10000. normalize (bool, optional): Whether to normalize the position embedding. Defaults to False. scale (float, optional): A scale factor that scales the position embedding. The scale will be used only when `normalize` is True. Defaults to 2*pi. eps (float, optional): A value added to the denominator for numerical stability. Defaults to 1e-6. offset (float): offset add to embed when do the normalization. Defaults to 0. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_feats, temperature=10000, normalize=False, scale=2 * math.pi, eps=1e-6, offset=0., init_cfg=None): super(SinePositionalEncoding, self).__init__(init_cfg) if normalize: assert isinstance(scale, (float, int)), 'when normalize is set,' \ 'scale should be provided and in float or int type, ' \ f'found {type(scale)}' self.num_feats = num_feats self.temperature = temperature self.normalize = normalize self.scale = scale self.eps = eps self.offset = offset def forward(self, mask): """Forward function for `SinePositionalEncoding`. Args: mask (Tensor): ByteTensor mask. Non-zero values representing ignored positions, while zero values means valid positions for this image. Shape [bs, h, w]. Returns: pos (Tensor): Returned position embedding with shape [bs, num_feats*2, h, w]. """ # For convenience of exporting to ONNX, it's required to convert # `masks` from bool to int. mask = mask.to(torch.int) not_mask = 1 - mask # logical_not y_embed = not_mask.cumsum(1, dtype=torch.float32) x_embed = not_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = (y_embed + self.offset) / \ (y_embed[:, -1:, :] + self.eps) * self.scale x_embed = (x_embed + self.offset) / \ (x_embed[:, :, -1:] + self.eps) * self.scale dim_t = torch.arange( self.num_feats, dtype=torch.float32, device=mask.device) dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t # use `view` instead of `flatten` for dynamically exporting to ONNX B, H, W = mask.size() pos_x = torch.stack( (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(B, H, W, -1) pos_y = torch.stack( (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(B, H, W, -1) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos def __repr__(self): """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(num_feats={self.num_feats}, ' repr_str += f'temperature={self.temperature}, ' repr_str += f'normalize={self.normalize}, ' repr_str += f'scale={self.scale}, ' repr_str += f'eps={self.eps})' return repr_str @POSITIONAL_ENCODING.register_module() class LearnedPositionalEncoding(BaseModule): """Position embedding with learnable embedding weights. Args: num_feats (int): The feature dimension for each position along x-axis or y-axis. The final returned dimension for each position is 2 times of this value. row_num_embed (int, optional): The dictionary size of row embeddings. Default 50. col_num_embed (int, optional): The dictionary size of col embeddings. Default 50. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_feats, row_num_embed=50, col_num_embed=50, init_cfg=dict(type='Uniform', layer='Embedding')): super(LearnedPositionalEncoding, self).__init__(init_cfg) self.row_embed = nn.Embedding(row_num_embed, num_feats) self.col_embed = nn.Embedding(col_num_embed, num_feats) self.num_feats = num_feats self.row_num_embed = row_num_embed self.col_num_embed = col_num_embed def forward(self, mask): """Forward function for `LearnedPositionalEncoding`. Args: mask (Tensor): ByteTensor mask. Non-zero values representing ignored positions, while zero values means valid positions for this image. Shape [bs, h, w]. Returns: pos (Tensor): Returned position embedding with shape [bs, num_feats*2, h, w]. """ h, w = mask.shape[-2:] x = torch.arange(w, device=mask.device) y = torch.arange(h, device=mask.device) x_embed = self.col_embed(x) y_embed = self.row_embed(y) pos = torch.cat( (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( 1, w, 1)), dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) return pos def __repr__(self): """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(num_feats={self.num_feats}, ' repr_str += f'row_num_embed={self.row_num_embed}, ' repr_str += f'col_num_embed={self.col_num_embed})' return repr_str ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/res_layer.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import BaseModule, Sequential from torch import nn as nn class ResLayer(Sequential): """ResLayer to build ResNet style backbone. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Default: True """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, **kwargs): self.block = block downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] if downsample_first: layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) else: # downsample_first=False is for HourglassModule for _ in range(num_blocks - 1): layers.append( block( inplanes=inplanes, planes=inplanes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers) class SimplifiedBasicBlock(BaseModule): """Simplified version of original basic residual block. This is used in `SCNet `_. - Norm layer is now optional - Last ReLU in forward function is removed """ expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_fg=None): super(SimplifiedBasicBlock, self).__init__(init_fg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' assert not with_cp, 'Not implemented yet.' self.with_norm = norm_cfg is not None with_bias = True if norm_cfg is None else False self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=with_bias) if self.with_norm: self.norm1_name, norm1 = build_norm_layer( norm_cfg, planes, postfix=1) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=with_bias) if self.with_norm: self.norm2_name, norm2 = build_norm_layer( norm_cfg, planes, postfix=2) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) if self.with_norm else None @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) if self.with_norm else None def forward(self, x): """Forward function.""" identity = x out = self.conv1(x) if self.with_norm: out = self.norm1(out) out = self.relu(out) out = self.conv2(out) if self.with_norm: out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/se_layer.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule class SELayer(BaseModule): """Squeeze-and-Excitation Module. Args: channels (int): The input (and output) channels of the SE layer. ratio (int): Squeeze ratio in SELayer, the intermediate channel will be ``int(channels/ratio)``. Default: 16. conv_cfg (None or dict): Config dict for convolution layer. Default: None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. Default: (dict(type='ReLU'), dict(type='Sigmoid')) init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, channels, ratio=16, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), init_cfg=None): super(SELayer, self).__init__(init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert len(act_cfg) == 2 assert mmcv.is_tuple_of(act_cfg, dict) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = ConvModule( in_channels=channels, out_channels=int(channels / ratio), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule( in_channels=int(channels / ratio), out_channels=channels, kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): out = self.global_avgpool(x) out = self.conv1(out) out = self.conv2(out) return x * out class DyReLU(BaseModule): """Dynamic ReLU (DyReLU) module. See `Dynamic ReLU `_ for details. Current implementation is specialized for task-aware attention in DyHead. HSigmoid arguments in default act_cfg follow DyHead official code. https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py Args: channels (int): The input (and output) channels of DyReLU module. ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module, the intermediate channel will be ``int(channels/ratio)``. Default: 4. conv_cfg (None or dict): Config dict for convolution layer. Default: None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0)) init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, channels, ratio=4, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0)), init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert len(act_cfg) == 2 assert mmcv.is_tuple_of(act_cfg, dict) self.channels = channels self.expansion = 4 # for a1, b1, a2, b2 self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = ConvModule( in_channels=channels, out_channels=int(channels / ratio), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule( in_channels=int(channels / ratio), out_channels=channels * self.expansion, kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): """Forward function.""" coeffs = self.global_avgpool(x) coeffs = self.conv1(coeffs) coeffs = self.conv2(coeffs) - 0.5 # value range: [-0.5, 0.5] a1, b1, a2, b2 = torch.split(coeffs, self.channels, dim=1) a1 = a1 * 2.0 + 1.0 # [-1.0, 1.0] + 1.0 a2 = a2 * 2.0 # [-1.0, 1.0] out = torch.max(x * a1 + b1, x * a2 + b2) return out ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/models/utils/transformer.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import math import warnings from typing import Sequence import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (build_activation_layer, build_conv_layer, build_norm_layer, xavier_init) from mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE) from mmcv.cnn.bricks.transformer import (BaseTransformerLayer, TransformerLayerSequence, build_transformer_layer_sequence) from mmcv.runner.base_module import BaseModule from mmcv.utils import to_2tuple from torch.nn.init import normal_ from mmdet.models.utils.builder import TRANSFORMER try: from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention except ImportError: warnings.warn( '`MultiScaleDeformableAttention` in MMCV has been moved to ' '`mmcv.ops.multi_scale_deform_attn`, please update your MMCV') from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention def nlc_to_nchw(x, hw_shape): """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. Args: x (Tensor): The input tensor of shape [N, L, C] before conversion. hw_shape (Sequence[int]): The height and width of output feature map. Returns: Tensor: The output tensor of shape [N, C, H, W] after conversion. """ H, W = hw_shape assert len(x.shape) == 3 B, L, C = x.shape assert L == H * W, 'The seq_len does not match H, W' return x.transpose(1, 2).reshape(B, C, H, W).contiguous() def nchw_to_nlc(x): """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. Args: x (Tensor): The input tensor of shape [N, C, H, W] before conversion. Returns: Tensor: The output tensor of shape [N, L, C] after conversion. """ assert len(x.shape) == 4 return x.flatten(2).transpose(1, 2).contiguous() class AdaptivePadding(nn.Module): """Applies padding to input (if needed) so that input can get fully covered by filter you specified. It support two modes "same" and "corner". The "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around input. The "corner" mode would pad zero to bottom right. Args: kernel_size (int | tuple): Size of the kernel: stride (int | tuple): Stride of the filter. Default: 1: dilation (int | tuple): Spacing between kernel elements. Default: 1 padding (str): Support "same" and "corner", "corner" mode would pad zero to bottom right, and "same" mode would pad zero around input. Default: "corner". Example: >>> kernel_size = 16 >>> stride = 16 >>> dilation = 1 >>> input = torch.rand(1, 1, 15, 17) >>> adap_pad = AdaptivePadding( >>> kernel_size=kernel_size, >>> stride=stride, >>> dilation=dilation, >>> padding="corner") >>> out = adap_pad(input) >>> assert (out.shape[2], out.shape[3]) == (16, 32) >>> input = torch.rand(1, 1, 16, 17) >>> out = adap_pad(input) >>> assert (out.shape[2], out.shape[3]) == (16, 32) """ def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): super(AdaptivePadding, self).__init__() assert padding in ('same', 'corner') kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) padding = to_2tuple(padding) dilation = to_2tuple(dilation) self.padding = padding self.kernel_size = kernel_size self.stride = stride self.dilation = dilation def get_pad_shape(self, input_shape): input_h, input_w = input_shape kernel_h, kernel_w = self.kernel_size stride_h, stride_w = self.stride output_h = math.ceil(input_h / stride_h) output_w = math.ceil(input_w / stride_w) pad_h = max((output_h - 1) * stride_h + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) pad_w = max((output_w - 1) * stride_w + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) return pad_h, pad_w def forward(self, x): pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) if pad_h > 0 or pad_w > 0: if self.padding == 'corner': x = F.pad(x, [0, pad_w, 0, pad_h]) elif self.padding == 'same': x = F.pad(x, [ pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 ]) return x class PatchEmbed(BaseModule): """Image to Patch Embedding. We use a conv layer to implement PatchEmbed. Args: in_channels (int): The num of input channels. Default: 3 embed_dims (int): The dimensions of embedding. Default: 768 conv_type (str): The config dict for embedding conv layer type selection. Default: "Conv2d. kernel_size (int): The kernel_size of embedding conv. Default: 16. stride (int): The slide stride of embedding conv. Default: None (Would be set as `kernel_size`). padding (int | tuple | string ): The padding length of embedding conv. When it is a string, it means the mode of adaptive padding, support "same" and "corner" now. Default: "corner". dilation (int): The dilation rate of embedding conv. Default: 1. bias (bool): Bias of embed conv. Default: True. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. input_size (int | tuple | None): The size of input, which will be used to calculate the out size. Only work when `dynamic_size` is False. Default: None. init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. Default: None. """ def __init__( self, in_channels=3, embed_dims=768, conv_type='Conv2d', kernel_size=16, stride=16, padding='corner', dilation=1, bias=True, norm_cfg=None, input_size=None, init_cfg=None, ): super(PatchEmbed, self).__init__(init_cfg=init_cfg) self.embed_dims = embed_dims if stride is None: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adap_padding = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) # disable the padding of conv padding = 0 else: self.adap_padding = None padding = to_2tuple(padding) self.projection = build_conv_layer( dict(type=conv_type), in_channels=in_channels, out_channels=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) if norm_cfg is not None: self.norm = build_norm_layer(norm_cfg, embed_dims)[1] else: self.norm = None if input_size: input_size = to_2tuple(input_size) # `init_out_size` would be used outside to # calculate the num_patches # when `use_abs_pos_embed` outside self.init_input_size = input_size if self.adap_padding: pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) input_h, input_w = input_size input_h = input_h + pad_h input_w = input_w + pad_w input_size = (input_h, input_w) # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html h_out = (input_size[0] + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // stride[0] + 1 w_out = (input_size[1] + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) // stride[1] + 1 self.init_out_size = (h_out, w_out) else: self.init_input_size = None self.init_out_size = None def forward(self, x): """ Args: x (Tensor): Has shape (B, C, H, W). In most case, C is 3. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, out_h * out_w, embed_dims) - out_size (tuple[int]): Spatial shape of x, arrange as (out_h, out_w). """ if self.adap_padding: x = self.adap_padding(x) x = self.projection(x) out_size = (x.shape[2], x.shape[3]) x = x.flatten(2).transpose(1, 2) if self.norm is not None: x = self.norm(x) return x, out_size class PatchMerging(BaseModule): """Merge patch feature map. This layer groups feature map by kernel_size, and applies norm and linear layers to the grouped feature map. Our implementation uses `nn.Unfold` to merge patch, which is about 25% faster than original implementation. Instead, we need to modify pretrained models for compatibility. Args: in_channels (int): The num of input channels. to gets fully covered by filter and stride you specified.. Default: True. out_channels (int): The num of output channels. kernel_size (int | tuple, optional): the kernel size in the unfold layer. Defaults to 2. stride (int | tuple, optional): the stride of the sliding blocks in the unfold layer. Default: None. (Would be set as `kernel_size`) padding (int | tuple | string ): The padding length of embedding conv. When it is a string, it means the mode of adaptive padding, support "same" and "corner" now. Default: "corner". dilation (int | tuple, optional): dilation parameter in the unfold layer. Default: 1. bias (bool, optional): Whether to add bias in linear layer or not. Defaults: False. norm_cfg (dict, optional): Config dict for normalization layer. Default: dict(type='LN'). init_cfg (dict, optional): The extra config for initialization. Default: None. """ def __init__(self, in_channels, out_channels, kernel_size=2, stride=None, padding='corner', dilation=1, bias=False, norm_cfg=dict(type='LN'), init_cfg=None): super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.out_channels = out_channels if stride: stride = stride else: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adap_padding = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) # disable the padding of unfold padding = 0 else: self.adap_padding = None padding = to_2tuple(padding) self.sampler = nn.Unfold( kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride) sample_dim = kernel_size[0] * kernel_size[1] * in_channels if norm_cfg is not None: self.norm = build_norm_layer(norm_cfg, sample_dim)[1] else: self.norm = None self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) def forward(self, x, input_size): """ Args: x (Tensor): Has shape (B, H*W, C_in). input_size (tuple[int]): The spatial shape of x, arrange as (H, W). Default: None. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) - out_size (tuple[int]): Spatial shape of x, arrange as (Merged_H, Merged_W). """ B, L, C = x.shape assert isinstance(input_size, Sequence), f'Expect ' \ f'input_size is ' \ f'`Sequence` ' \ f'but get {input_size}' H, W = input_size assert L == H * W, 'input feature has wrong size' x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W # Use nn.Unfold to merge patch. About 25% faster than original method, # but need to modify pretrained model for compatibility if self.adap_padding: x = self.adap_padding(x) H, W = x.shape[-2:] x = self.sampler(x) # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * (self.sampler.kernel_size[0] - 1) - 1) // self.sampler.stride[0] + 1 out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * (self.sampler.kernel_size[1] - 1) - 1) // self.sampler.stride[1] + 1 output_size = (out_h, out_w) x = x.transpose(1, 2) # B, H/2*W/2, 4*C x = self.norm(x) if self.norm else x x = self.reduction(x) return x, output_size def inverse_sigmoid(x, eps=1e-5): """Inverse function of sigmoid. Args: x (Tensor): The tensor to do the inverse. eps (float): EPS avoid numerical overflow. Defaults 1e-5. Returns: Tensor: The x has passed the inverse function of sigmoid, has same shape with input. """ x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) @TRANSFORMER_LAYER.register_module() class DetrTransformerDecoderLayer(BaseTransformerLayer): """Implements decoder layer in DETR transformer. Args: attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): Configs for self_attention or cross_attention, the order should be consistent with it in `operation_order`. If it is a dict, it would be expand to the number of attention in `operation_order`. feedforward_channels (int): The hidden dimension for FFNs. ffn_dropout (float): Probability of an element to be zeroed in ffn. Default 0.0. operation_order (tuple[str]): The execution order of operation in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). Default:None act_cfg (dict): The activation config for FFNs. Default: `LN` norm_cfg (dict): Config dict for normalization layer. Default: `LN`. ffn_num_fcs (int): The number of fully-connected layers in FFNs. Default:2. """ def __init__(self, attn_cfgs, feedforward_channels, ffn_dropout=0.0, operation_order=None, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'), ffn_num_fcs=2, **kwargs): super(DetrTransformerDecoderLayer, self).__init__( attn_cfgs=attn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order, act_cfg=act_cfg, norm_cfg=norm_cfg, ffn_num_fcs=ffn_num_fcs, **kwargs) assert len(operation_order) == 6 assert set(operation_order) == set( ['self_attn', 'norm', 'cross_attn', 'ffn']) @TRANSFORMER_LAYER_SEQUENCE.register_module() class DetrTransformerEncoder(TransformerLayerSequence): """TransformerEncoder of DETR. Args: post_norm_cfg (dict): Config of last normalization layer. Default: `LN`. Only used when `self.pre_norm` is `True` """ def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs): super(DetrTransformerEncoder, self).__init__(*args, **kwargs) if post_norm_cfg is not None: self.post_norm = build_norm_layer( post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None else: assert not self.pre_norm, f'Use prenorm in ' \ f'{self.__class__.__name__},' \ f'Please specify post_norm_cfg' self.post_norm = None def forward(self, *args, **kwargs): """Forward function for `TransformerCoder`. Returns: Tensor: forwarded results with shape [num_query, bs, embed_dims]. """ x = super(DetrTransformerEncoder, self).forward(*args, **kwargs) if self.post_norm is not None: x = self.post_norm(x) return x @TRANSFORMER_LAYER_SEQUENCE.register_module() class DetrTransformerDecoder(TransformerLayerSequence): """Implements the decoder in DETR transformer. Args: return_intermediate (bool): Whether to return intermediate outputs. post_norm_cfg (dict): Config of last normalization layer. Default: `LN`. """ def __init__(self, *args, post_norm_cfg=dict(type='LN'), return_intermediate=False, **kwargs): super(DetrTransformerDecoder, self).__init__(*args, **kwargs) self.return_intermediate = return_intermediate if post_norm_cfg is not None: self.post_norm = build_norm_layer(post_norm_cfg, self.embed_dims)[1] else: self.post_norm = None def forward(self, query, *args, **kwargs): """Forward function for `TransformerDecoder`. Args: query (Tensor): Input query with shape `(num_query, bs, embed_dims)`. Returns: Tensor: Results with shape [1, num_query, bs, embed_dims] when return_intermediate is `False`, otherwise it has shape [num_layers, num_query, bs, embed_dims]. """ if not self.return_intermediate: x = super().forward(query, *args, **kwargs) if self.post_norm: x = self.post_norm(x)[None] return x intermediate = [] for layer in self.layers: query = layer(query, *args, **kwargs) if self.return_intermediate: if self.post_norm is not None: intermediate.append(self.post_norm(query)) else: intermediate.append(query) return torch.stack(intermediate) @TRANSFORMER.register_module() class Transformer(BaseModule): """Implements the DETR transformer. Following the official DETR implementation, this module copy-paste from torch.nn.Transformer with modifications: * positional encodings are passed in MultiheadAttention * extra LN at the end of encoder is removed * decoder returns a stack of activations from all decoding layers See `paper: End-to-End Object Detection with Transformers `_ for details. Args: encoder (`mmcv.ConfigDict` | Dict): Config of TransformerEncoder. Defaults to None. decoder ((`mmcv.ConfigDict` | Dict)): Config of TransformerDecoder. Defaults to None init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Defaults to None. """ def __init__(self, encoder=None, decoder=None, init_cfg=None): super(Transformer, self).__init__(init_cfg=init_cfg) self.encoder = build_transformer_layer_sequence(encoder) self.decoder = build_transformer_layer_sequence(decoder) self.embed_dims = self.encoder.embed_dims def init_weights(self): # follow the official DETR to init parameters for m in self.modules(): if hasattr(m, 'weight') and m.weight.dim() > 1: xavier_init(m, distribution='uniform') self._is_init = True def forward(self, x, mask, query_embed, pos_embed): """Forward function for `Transformer`. Args: x (Tensor): Input query with shape [bs, c, h, w] where c = embed_dims. mask (Tensor): The key_padding_mask used for encoder and decoder, with shape [bs, h, w]. query_embed (Tensor): The query embedding for decoder, with shape [num_query, c]. pos_embed (Tensor): The positional encoding for encoder and decoder, with the same shape as `x`. Returns: tuple[Tensor]: results of decoder containing the following tensor. - out_dec: Output from decoder. If return_intermediate_dec \ is True output has shape [num_dec_layers, bs, num_query, embed_dims], else has shape [1, bs, \ num_query, embed_dims]. - memory: Output results from encoder, with shape \ [bs, embed_dims, h, w]. """ bs, c, h, w = x.shape # use `view` instead of `flatten` for dynamically exporting to ONNX x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) query_embed = query_embed.unsqueeze(1).repeat( 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] memory = self.encoder( query=x, key=None, value=None, query_pos=pos_embed, query_key_padding_mask=mask) target = torch.zeros_like(query_embed) # out_dec: [num_layers, num_query, bs, dim] out_dec = self.decoder( query=target, key=memory, value=memory, key_pos=pos_embed, query_pos=query_embed, key_padding_mask=mask) out_dec = out_dec.transpose(1, 2) memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) return out_dec, memory @TRANSFORMER_LAYER_SEQUENCE.register_module() class DeformableDetrTransformerDecoder(TransformerLayerSequence): """Implements the decoder in DETR transformer. Args: return_intermediate (bool): Whether to return intermediate outputs. coder_norm_cfg (dict): Config of last normalization layer. Default: `LN`. """ def __init__(self, *args, return_intermediate=False, **kwargs): super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) self.return_intermediate = return_intermediate def forward(self, query, *args, reference_points=None, valid_ratios=None, reg_branches=None, **kwargs): """Forward function for `TransformerDecoder`. Args: query (Tensor): Input query with shape `(num_query, bs, embed_dims)`. reference_points (Tensor): The reference points of offset. has shape (bs, num_query, 4) when as_two_stage, otherwise has shape ((bs, num_query, 2). valid_ratios (Tensor): The radios of valid points on the feature map, has shape (bs, num_levels, 2) reg_branch: (obj:`nn.ModuleList`): Used for refining the regression results. Only would be passed when with_box_refine is True, otherwise would be passed a `None`. Returns: Tensor: Results with shape [1, num_query, bs, embed_dims] when return_intermediate is `False`, otherwise it has shape [num_layers, num_query, bs, embed_dims]. """ output = query intermediate = [] intermediate_reference_points = [] for lid, layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = reference_points[:, :, None] * \ torch.cat([valid_ratios, valid_ratios], -1)[:, None] else: assert reference_points.shape[-1] == 2 reference_points_input = reference_points[:, :, None] * \ valid_ratios[:, None] output = layer( output, *args, reference_points=reference_points_input, **kwargs) output = output.permute(1, 0, 2) if reg_branches is not None: tmp = reg_branches[lid](output) if reference_points.shape[-1] == 4: new_reference_points = tmp + inverse_sigmoid( reference_points) new_reference_points = new_reference_points.sigmoid() else: assert reference_points.shape[-1] == 2 new_reference_points = tmp new_reference_points[..., :2] = tmp[ ..., :2] + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() output = output.permute(1, 0, 2) if self.return_intermediate: intermediate.append(output) intermediate_reference_points.append(reference_points) if self.return_intermediate: return torch.stack(intermediate), torch.stack( intermediate_reference_points) return output, reference_points @TRANSFORMER.register_module() class DeformableDetrTransformer(Transformer): """Implements the DeformableDETR transformer. Args: as_two_stage (bool): Generate query from encoder features. Default: False. num_feature_levels (int): Number of feature maps from FPN: Default: 4. two_stage_num_proposals (int): Number of proposals when set `as_two_stage` as True. Default: 300. """ def __init__(self, as_two_stage=False, num_feature_levels=4, two_stage_num_proposals=300, **kwargs): super(DeformableDetrTransformer, self).__init__(**kwargs) self.as_two_stage = as_two_stage self.num_feature_levels = num_feature_levels self.two_stage_num_proposals = two_stage_num_proposals self.embed_dims = self.encoder.embed_dims self.init_layers() def init_layers(self): """Initialize layers of the DeformableDetrTransformer.""" self.level_embeds = nn.Parameter( torch.Tensor(self.num_feature_levels, self.embed_dims)) if self.as_two_stage: self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) self.enc_output_norm = nn.LayerNorm(self.embed_dims) self.pos_trans = nn.Linear(self.embed_dims * 2, self.embed_dims * 2) self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) else: self.reference_points = nn.Linear(self.embed_dims, 2) def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules(): if isinstance(m, MultiScaleDeformableAttention): m.init_weights() if not self.as_two_stage: xavier_init(self.reference_points, distribution='uniform', bias=0.) normal_(self.level_embeds) def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes): """Generate proposals from encoded memory. Args: memory (Tensor) : The output of encoder, has shape (bs, num_key, embed_dim). num_key is equal the number of points on feature map from all level. memory_padding_mask (Tensor): Padding mask for memory. has shape (bs, num_key). spatial_shapes (Tensor): The shape of all feature maps. has shape (num_level, 2). Returns: tuple: A tuple of feature map and bbox prediction. - output_memory (Tensor): The input of decoder, \ has shape (bs, num_key, embed_dim). num_key is \ equal the number of points on feature map from \ all levels. - output_proposals (Tensor): The normalized proposal \ after a inverse sigmoid, has shape \ (bs, num_keys, 4). """ N, S, C = memory.shape proposals = [] _cur = 0 for lvl, (H, W) in enumerate(spatial_shapes): mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( N, H, W, 1) valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) grid_y, grid_x = torch.meshgrid( torch.linspace( 0, H - 1, H, dtype=torch.float32, device=memory.device), torch.linspace( 0, W - 1, W, dtype=torch.float32, device=memory.device)) grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) proposal = torch.cat((grid, wh), -1).view(N, -1, 4) proposals.append(proposal) _cur += (H * W) output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all( -1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) output_proposals = output_proposals.masked_fill( memory_padding_mask.unsqueeze(-1), float('inf')) output_proposals = output_proposals.masked_fill( ~output_proposals_valid, float('inf')) output_memory = memory output_memory = output_memory.masked_fill( memory_padding_mask.unsqueeze(-1), float(0)) output_memory = output_memory.masked_fill(~output_proposals_valid, float(0)) output_memory = self.enc_output_norm(self.enc_output(output_memory)) return output_memory, output_proposals @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """Get the reference points used in decoder. Args: spatial_shapes (Tensor): The shape of all feature maps, has shape (num_level, 2). valid_ratios (Tensor): The radios of valid points on the feature map, has shape (bs, num_levels, 2) device (obj:`device`): The device where reference_points should be. Returns: Tensor: reference points used in decoder, has \ shape (bs, num_keys, num_levels, 2). """ reference_points_list = [] for lvl, (H, W) in enumerate(spatial_shapes): # TODO check this 0.5 ref_y, ref_x = torch.meshgrid( torch.linspace( 0.5, H - 0.5, H, dtype=torch.float32, device=device), torch.linspace( 0.5, W - 0.5, W, dtype=torch.float32, device=device)) ref_y = ref_y.reshape(-1)[None] / ( valid_ratios[:, None, lvl, 1] * H) ref_x = ref_x.reshape(-1)[None] / ( valid_ratios[:, None, lvl, 0] * W) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def get_valid_ratio(self, mask): """Get the valid radios of feature maps of all level.""" _, H, W = mask.shape valid_H = torch.sum(~mask[:, :, 0], 1) valid_W = torch.sum(~mask[:, 0, :], 1) valid_ratio_h = valid_H.float() / H valid_ratio_w = valid_W.float() / W valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) return valid_ratio def get_proposal_pos_embed(self, proposals, num_pos_feats=128, temperature=10000): """Get the position embedding of proposal.""" scale = 2 * math.pi dim_t = torch.arange( num_pos_feats, dtype=torch.float32, device=proposals.device) dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) # N, L, 4 proposals = proposals.sigmoid() * scale # N, L, 4, 128 pos = proposals[:, :, :, None] / dim_t # N, L, 4, 64, 2 pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) return pos def forward(self, mlvl_feats, mlvl_masks, query_embed, mlvl_pos_embeds, reg_branches=None, cls_branches=None, **kwargs): """Forward function for `Transformer`. Args: mlvl_feats (list(Tensor)): Input queries from different level. Each element has shape [bs, embed_dims, h, w]. mlvl_masks (list(Tensor)): The key_padding_mask from different level used for encoder and decoder, each element has shape [bs, h, w]. query_embed (Tensor): The query embedding for decoder, with shape [num_query, c]. mlvl_pos_embeds (list(Tensor)): The positional encoding of feats from different level, has the shape [bs, embed_dims, h, w]. reg_branches (obj:`nn.ModuleList`): Regression heads for feature maps from each decoder layer. Only would be passed when `with_box_refine` is True. Default to None. cls_branches (obj:`nn.ModuleList`): Classification heads for feature maps from each decoder layer. Only would be passed when `as_two_stage` is True. Default to None. Returns: tuple[Tensor]: results of decoder containing the following tensor. - inter_states: Outputs from decoder. If return_intermediate_dec is True output has shape \ (num_dec_layers, bs, num_query, embed_dims), else has \ shape (1, bs, num_query, embed_dims). - init_reference_out: The initial value of reference \ points, has shape (bs, num_queries, 4). - inter_references_out: The internal value of reference \ points in decoder, has shape \ (num_dec_layers, bs,num_query, embed_dims) - enc_outputs_class: The classification score of \ proposals generated from \ encoder's feature maps, has shape \ (batch, h*w, num_classes). \ Only would be returned when `as_two_stage` is True, \ otherwise None. - enc_outputs_coord_unact: The regression results \ generated from encoder's feature maps., has shape \ (batch, h*w, 4). Only would \ be returned when `as_two_stage` is True, \ otherwise None. """ assert self.as_two_stage or query_embed is not None feat_flatten = [] mask_flatten = [] lvl_pos_embed_flatten = [] spatial_shapes = [] for lvl, (feat, mask, pos_embed) in enumerate( zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): bs, c, h, w = feat.shape spatial_shape = (h, w) spatial_shapes.append(spatial_shape) feat = feat.flatten(2).transpose(1, 2) mask = mask.flatten(1) pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) feat_flatten.append(feat) mask_flatten.append(mask) feat_flatten = torch.cat(feat_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor( spatial_shapes, dtype=torch.long, device=feat_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros( (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack( [self.get_valid_ratio(m) for m in mlvl_masks], 1) reference_points = \ self.get_reference_points(spatial_shapes, valid_ratios, device=feat.device) feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( 1, 0, 2) # (H*W, bs, embed_dims) memory = self.encoder( query=feat_flatten, key=None, value=None, query_pos=lvl_pos_embed_flatten, query_key_padding_mask=mask_flatten, spatial_shapes=spatial_shapes, reference_points=reference_points, level_start_index=level_start_index, valid_ratios=valid_ratios, **kwargs) memory = memory.permute(1, 0, 2) bs, _, c = memory.shape if self.as_two_stage: output_memory, output_proposals = \ self.gen_encoder_output_proposals( memory, mask_flatten, spatial_shapes) enc_outputs_class = cls_branches[self.decoder.num_layers]( output_memory) enc_outputs_coord_unact = \ reg_branches[ self.decoder.num_layers](output_memory) + output_proposals topk = self.two_stage_num_proposals # We only use the first channel in enc_outputs_class as foreground, # the other (num_classes - 1) channels are actually not used. # Its targets are set to be 0s, which indicates the first # class (foreground) because we use [0, num_classes - 1] to # indicate class labels, background class is indicated by # num_classes (similar convention in RPN). # See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa # This follows the official implementation of Deformable DETR. topk_proposals = torch.topk( enc_outputs_class[..., 0], topk, dim=1)[1] topk_coords_unact = torch.gather( enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) topk_coords_unact = topk_coords_unact.detach() reference_points = topk_coords_unact.sigmoid() init_reference_out = reference_points pos_trans_out = self.pos_trans_norm( self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) query_pos, query = torch.split(pos_trans_out, c, dim=2) else: query_pos, query = torch.split(query_embed, c, dim=1) query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) query = query.unsqueeze(0).expand(bs, -1, -1) reference_points = self.reference_points(query_pos).sigmoid() init_reference_out = reference_points # decoder query = query.permute(1, 0, 2) memory = memory.permute(1, 0, 2) query_pos = query_pos.permute(1, 0, 2) inter_states, inter_references = self.decoder( query=query, key=None, value=memory, query_pos=query_pos, key_padding_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, reg_branches=reg_branches, **kwargs) inter_references_out = inter_references if self.as_two_stage: return inter_states, init_reference_out,\ inter_references_out, enc_outputs_class,\ enc_outputs_coord_unact return inter_states, init_reference_out, \ inter_references_out, None, None @TRANSFORMER.register_module() class DynamicConv(BaseModule): """Implements Dynamic Convolution. This module generate parameters for each sample and use bmm to implement 1*1 convolution. Code is modified from the `official github repo `_ . Args: in_channels (int): The input feature channel. Defaults to 256. feat_channels (int): The inner feature channel. Defaults to 64. out_channels (int, optional): The output feature channel. When not specified, it will be set to `in_channels` by default input_feat_shape (int): The shape of input feature. Defaults to 7. with_proj (bool): Project two-dimentional feature to one-dimentional feature. Default to True. act_cfg (dict): The activation config for DynamicConv. norm_cfg (dict): Config dict for normalization layer. Default layer normalization. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, in_channels=256, feat_channels=64, out_channels=None, input_feat_shape=7, with_proj=True, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'), init_cfg=None): super(DynamicConv, self).__init__(init_cfg) self.in_channels = in_channels self.feat_channels = feat_channels self.out_channels_raw = out_channels self.input_feat_shape = input_feat_shape self.with_proj = with_proj self.act_cfg = act_cfg self.norm_cfg = norm_cfg self.out_channels = out_channels if out_channels else in_channels self.num_params_in = self.in_channels * self.feat_channels self.num_params_out = self.out_channels * self.feat_channels self.dynamic_layer = nn.Linear( self.in_channels, self.num_params_in + self.num_params_out) self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1] self.activation = build_activation_layer(act_cfg) num_output = self.out_channels * input_feat_shape**2 if self.with_proj: self.fc_layer = nn.Linear(num_output, self.out_channels) self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] def forward(self, param_feature, input_feature): """Forward function for `DynamicConv`. Args: param_feature (Tensor): The feature can be used to generate the parameter, has shape (num_all_proposals, in_channels). input_feature (Tensor): Feature that interact with parameters, has shape (num_all_proposals, in_channels, H, W). Returns: Tensor: The output feature has shape (num_all_proposals, out_channels). """ input_feature = input_feature.flatten(2).permute(2, 0, 1) input_feature = input_feature.permute(1, 0, 2) parameters = self.dynamic_layer(param_feature) param_in = parameters[:, :self.num_params_in].view( -1, self.in_channels, self.feat_channels) param_out = parameters[:, -self.num_params_out:].view( -1, self.feat_channels, self.out_channels) # input_feature has shape (num_all_proposals, H*W, in_channels) # param_in has shape (num_all_proposals, in_channels, feat_channels) # feature has shape (num_all_proposals, H*W, feat_channels) features = torch.bmm(input_feature, param_in) features = self.norm_in(features) features = self.activation(features) # param_out has shape (batch_size, feat_channels, out_channels) features = torch.bmm(features, param_out) features = self.norm_out(features) features = self.activation(features) if self.with_proj: features = features.flatten(1) features = self.fc_layer(features) features = self.fc_norm(features) features = self.activation(features) return features ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .compat_config import compat_cfg from .logger import get_caller_name, get_root_logger, log_img_scale from .memory import AvoidCUDAOOM, AvoidOOM from .misc import find_latest_checkpoint, update_data_root from .replace_cfg_vals import replace_cfg_vals from .setup_env import setup_multi_processes from .split_batch import split_batch from .util_distribution import build_ddp, build_dp, get_device __all__ = [ 'get_root_logger', 'collect_env', 'find_latest_checkpoint', 'update_data_root', 'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp', 'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM' ] ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/collect_env.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import collect_env as collect_base_env from mmcv.utils import get_git_hash import mmdet def collect_env(): """Collect the information of the running environments.""" env_info = collect_base_env() env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7] return env_info if __name__ == '__main__': for name, val in collect_env().items(): print(f'{name}: {val}') ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/compat_config.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from mmcv import ConfigDict def compat_cfg(cfg): """This function would modify some filed to keep the compatibility of config. For example, it will move some args which will be deprecated to the correct fields. """ cfg = copy.deepcopy(cfg) cfg = compat_imgs_per_gpu(cfg) cfg = compat_loader_args(cfg) cfg = compat_runner_args(cfg) return cfg def compat_runner_args(cfg): if 'runner' not in cfg: cfg.runner = ConfigDict({ 'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs }) warnings.warn( 'config is now expected to have a `runner` section, ' 'please set `runner` in your config.', UserWarning) else: if 'total_epochs' in cfg: assert cfg.total_epochs == cfg.runner.max_epochs return cfg def compat_imgs_per_gpu(cfg): cfg = copy.deepcopy(cfg) if 'imgs_per_gpu' in cfg.data: warnings.warn('"imgs_per_gpu" is deprecated in MMDet V2.0. ' 'Please use "samples_per_gpu" instead') if 'samples_per_gpu' in cfg.data: warnings.warn( f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' f'={cfg.data.imgs_per_gpu} is used in this experiments') else: warnings.warn('Automatically set "samples_per_gpu"="imgs_per_gpu"=' f'{cfg.data.imgs_per_gpu} in this experiments') cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu return cfg def compat_loader_args(cfg): """Deprecated sample_per_gpu in cfg.data.""" cfg = copy.deepcopy(cfg) if 'train_dataloader' not in cfg.data: cfg.data['train_dataloader'] = ConfigDict() if 'val_dataloader' not in cfg.data: cfg.data['val_dataloader'] = ConfigDict() if 'test_dataloader' not in cfg.data: cfg.data['test_dataloader'] = ConfigDict() # special process for train_dataloader if 'samples_per_gpu' in cfg.data: samples_per_gpu = cfg.data.pop('samples_per_gpu') assert 'samples_per_gpu' not in \ cfg.data.train_dataloader, ('`samples_per_gpu` are set ' 'in `data` field and ` ' 'data.train_dataloader` ' 'at the same time. ' 'Please only set it in ' '`data.train_dataloader`. ') cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu if 'persistent_workers' in cfg.data: persistent_workers = cfg.data.pop('persistent_workers') assert 'persistent_workers' not in \ cfg.data.train_dataloader, ('`persistent_workers` are set ' 'in `data` field and ` ' 'data.train_dataloader` ' 'at the same time. ' 'Please only set it in ' '`data.train_dataloader`. ') cfg.data.train_dataloader['persistent_workers'] = persistent_workers if 'workers_per_gpu' in cfg.data: workers_per_gpu = cfg.data.pop('workers_per_gpu') cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu # special process for val_dataloader if 'samples_per_gpu' in cfg.data.val: # keep default value of `sample_per_gpu` is 1 assert 'samples_per_gpu' not in \ cfg.data.val_dataloader, ('`samples_per_gpu` are set ' 'in `data.val` field and ` ' 'data.val_dataloader` at ' 'the same time. ' 'Please only set it in ' '`data.val_dataloader`. ') cfg.data.val_dataloader['samples_per_gpu'] = \ cfg.data.val.pop('samples_per_gpu') # special process for val_dataloader # in case the test dataset is concatenated if isinstance(cfg.data.test, dict): if 'samples_per_gpu' in cfg.data.test: assert 'samples_per_gpu' not in \ cfg.data.test_dataloader, ('`samples_per_gpu` are set ' 'in `data.test` field and ` ' 'data.test_dataloader` ' 'at the same time. ' 'Please only set it in ' '`data.test_dataloader`. ') cfg.data.test_dataloader['samples_per_gpu'] = \ cfg.data.test.pop('samples_per_gpu') elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: if 'samples_per_gpu' in ds_cfg: assert 'samples_per_gpu' not in \ cfg.data.test_dataloader, ('`samples_per_gpu` are set ' 'in `data.test` field and ` ' 'data.test_dataloader` at' ' the same time. ' 'Please only set it in ' '`data.test_dataloader`. ') samples_per_gpu = max( [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu return cfg ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/contextmanagers.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import asyncio import contextlib import logging import os import time from typing import List import torch logger = logging.getLogger(__name__) DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) @contextlib.asynccontextmanager async def completed(trace_name='', name='', sleep_interval=0.05, streams: List[torch.cuda.Stream] = None): """Async context manager that waits for work to complete on given CUDA streams.""" if not torch.cuda.is_available(): yield return stream_before_context_switch = torch.cuda.current_stream() if not streams: streams = [stream_before_context_switch] else: streams = [s if s else stream_before_context_switch for s in streams] end_events = [ torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams ] if DEBUG_COMPLETED_TIME: start = torch.cuda.Event(enable_timing=True) stream_before_context_switch.record_event(start) cpu_start = time.monotonic() logger.debug('%s %s starting, streams: %s', trace_name, name, streams) grad_enabled_before = torch.is_grad_enabled() try: yield finally: current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_end = time.monotonic() for i, stream in enumerate(streams): event = end_events[i] stream.record_event(event) grad_enabled_after = torch.is_grad_enabled() # observed change of torch.is_grad_enabled() during concurrent run of # async_test_bboxes code assert (grad_enabled_before == grad_enabled_after ), 'Unexpected is_grad_enabled() value change' are_done = [e.query() for e in end_events] logger.debug('%s %s completed: %s streams: %s', trace_name, name, are_done, streams) with torch.cuda.stream(stream_before_context_switch): while not all(are_done): await asyncio.sleep(sleep_interval) are_done = [e.query() for e in end_events] logger.debug( '%s %s completed: %s streams: %s', trace_name, name, are_done, streams, ) current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_time = (cpu_end - cpu_start) * 1000 stream_times_ms = '' for i, stream in enumerate(streams): elapsed_time = start.elapsed_time(end_events[i]) stream_times_ms += f' {stream} {elapsed_time:.2f} ms' logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, stream_times_ms) @contextlib.asynccontextmanager async def concurrent(streamqueue: asyncio.Queue, trace_name='concurrent', name='stream'): """Run code concurrently in different streams. :param streamqueue: asyncio.Queue instance. Queue tasks define the pool of streams used for concurrent execution. """ if not torch.cuda.is_available(): yield return initial_stream = torch.cuda.current_stream() with torch.cuda.stream(initial_stream): stream = await streamqueue.get() assert isinstance(stream, torch.cuda.Stream) try: with torch.cuda.stream(stream): logger.debug('%s %s is starting, stream: %s', trace_name, name, stream) yield current = torch.cuda.current_stream() assert current == stream logger.debug('%s %s has finished, stream: %s', trace_name, name, stream) finally: streamqueue.task_done() streamqueue.put_nowait(stream) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/logger.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import inspect import logging from mmcv.utils import get_logger def get_root_logger(log_file=None, log_level=logging.INFO): """Get root logger. Args: log_file (str, optional): File path of log. Defaults to None. log_level (int, optional): The level of logger. Defaults to logging.INFO. Returns: :obj:`logging.Logger`: The obtained logger """ logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) return logger def get_caller_name(): """Get name of caller method.""" # this_func_frame = inspect.stack()[0][0] # i.e., get_caller_name # callee_frame = inspect.stack()[1][0] # e.g., log_img_scale caller_frame = inspect.stack()[2][0] # e.g., caller of log_img_scale caller_method = caller_frame.f_code.co_name try: caller_class = caller_frame.f_locals['self'].__class__.__name__ return f'{caller_class}.{caller_method}' except KeyError: # caller is a function return caller_method def log_img_scale(img_scale, shape_order='hw', skip_square=False): """Log image size. Args: img_scale (tuple): Image size to be logged. shape_order (str, optional): The order of image shape. 'hw' for (height, width) and 'wh' for (width, height). Defaults to 'hw'. skip_square (bool, optional): Whether to skip logging for square img_scale. Defaults to False. Returns: bool: Whether to have done logging. """ if shape_order == 'hw': height, width = img_scale elif shape_order == 'wh': width, height = img_scale else: raise ValueError(f'Invalid shape_order {shape_order}.') if skip_square and (height == width): return False logger = get_root_logger() caller = get_caller_name() logger.info(f'image shape: height={height}, width={width} in {caller}') return True ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/memory.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings from collections import abc from contextlib import contextmanager from functools import wraps import torch from mmdet.utils import get_root_logger def cast_tensor_type(inputs, src_type=None, dst_type=None): """Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``. Args: inputs: Inputs that to be casted. src_type (torch.dtype | torch.device): Source type. src_type (torch.dtype | torch.device): Destination type. Returns: The same type with inputs, but all contained Tensors have been cast. """ assert dst_type is not None if isinstance(inputs, torch.Tensor): if isinstance(dst_type, torch.device): # convert Tensor to dst_device if hasattr(inputs, 'to') and \ hasattr(inputs, 'device') and \ (inputs.device == src_type or src_type is None): return inputs.to(dst_type) else: return inputs else: # convert Tensor to dst_dtype if hasattr(inputs, 'to') and \ hasattr(inputs, 'dtype') and \ (inputs.dtype == src_type or src_type is None): return inputs.to(dst_type) else: return inputs # we need to ensure that the type of inputs to be casted are the same # as the argument `src_type`. elif isinstance(inputs, abc.Mapping): return type(inputs)({ k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type) for k, v in inputs.items() }) elif isinstance(inputs, abc.Iterable): return type(inputs)( cast_tensor_type(item, src_type=src_type, dst_type=dst_type) for item in inputs) # TODO: Currently not supported # elif isinstance(inputs, InstanceData): # for key, value in inputs.items(): # inputs[key] = cast_tensor_type( # value, src_type=src_type, dst_type=dst_type) # return inputs else: return inputs @contextmanager def _ignore_torch_cuda_oom(): """A context which ignores CUDA OOM exception from pytorch. Code is modified from # noqa: E501 """ try: yield except RuntimeError as e: # NOTE: the string may change? if 'CUDA out of memory. ' in str(e): pass else: raise class AvoidOOM: """Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of Memory error. It will do the following steps: 1. First retry after calling `torch.cuda.empty_cache()`. 2. If that still fails, it will then retry by converting inputs to FP16. 3. If that still fails trying to convert inputs to CPUs. In this case, it expects the function to dispatch to CPU implementation. Args: to_cpu (bool): Whether to convert outputs to CPU if get an OOM error. This will slow down the code significantly. Defaults to True. test (bool): Skip `_ignore_torch_cuda_oom` operate that can use lightweight data in unit test, only used in test unit. Defaults to False. Examples: >>> from mmdet.utils.memory import AvoidOOM >>> AvoidCUDAOOM = AvoidOOM() >>> output = AvoidOOM.retry_if_cuda_oom( >>> some_torch_function)(input1, input2) >>> # To use as a decorator >>> # from mmdet.utils import AvoidCUDAOOM >>> @AvoidCUDAOOM.retry_if_cuda_oom >>> def function(*args, **kwargs): >>> return None ``` Note: 1. The output may be on CPU even if inputs are on GPU. Processing on CPU will slow down the code significantly. 2. When converting inputs to CPU, it will only look at each argument and check if it has `.device` and `.to` for conversion. Nested structures of tensors are not supported. 3. Since the function might be called more than once, it has to be stateless. """ def __init__(self, to_cpu=True, test=False): self.to_cpu = to_cpu self.test = test def retry_if_cuda_oom(self, func): """Makes a function retry itself after encountering pytorch's CUDA OOM error. The implementation logic is referred to https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py Args: func: a stateless callable that takes tensor-like objects as arguments. Returns: func: a callable which retries `func` if OOM is encountered. """ # noqa: W605 @wraps(func) def wrapped(*args, **kwargs): # raw function if not self.test: with _ignore_torch_cuda_oom(): return func(*args, **kwargs) # Clear cache and retry torch.cuda.empty_cache() with _ignore_torch_cuda_oom(): return func(*args, **kwargs) # get the type and device of first tensor dtype, device = None, None values = args + tuple(kwargs.values()) for value in values: if isinstance(value, torch.Tensor): dtype = value.dtype device = value.device break if dtype is None or device is None: raise ValueError('There is no tensor in the inputs, ' 'cannot get dtype and device.') # Convert to FP16 fp16_args = cast_tensor_type(args, dst_type=torch.half) fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half) logger = get_root_logger() logger.warning(f'Attempting to copy inputs of {str(func)} ' 'to FP16 due to CUDA OOM') # get input tensor type, the output type will same as # the first parameter type. with _ignore_torch_cuda_oom(): output = func(*fp16_args, **fp16_kwargs) output = cast_tensor_type( output, src_type=torch.half, dst_type=dtype) if not self.test: return output logger.warning('Using FP16 still meet CUDA OOM') # Try on CPU. This will slow down the code significantly, # therefore print a notice. if self.to_cpu: logger.warning(f'Attempting to copy inputs of {str(func)} ' 'to CPU due to CUDA OOM') cpu_device = torch.empty(0).device cpu_args = cast_tensor_type(args, dst_type=cpu_device) cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device) # convert outputs to GPU with _ignore_torch_cuda_oom(): logger.warning(f'Convert outputs to GPU (device={device})') output = func(*cpu_args, **cpu_kwargs) output = cast_tensor_type( output, src_type=cpu_device, dst_type=device) return output warnings.warn('Cannot convert output to GPU due to CUDA OOM, ' 'the output is now on CPU, which might cause ' 'errors if the output need to interact with GPU ' 'data in subsequent operations') logger.warning('Cannot convert output to GPU due to ' 'CUDA OOM, the output is on CPU now.') return func(*cpu_args, **cpu_kwargs) else: # may still get CUDA OOM error return func(*args, **kwargs) return wrapped # To use AvoidOOM as a decorator AvoidCUDAOOM = AvoidOOM() ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/misc.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import glob import os import os.path as osp import warnings import mmcv import torch from mmcv.utils import TORCH_VERSION, digit_version, print_log def find_latest_checkpoint(path, suffix='pth'): """Find the latest checkpoint from the working directory. Args: path(str): The path to find checkpoints. suffix(str): File extension. Defaults to pth. Returns: latest_path(str | None): File path of the latest checkpoint. References: .. [1] https://github.com/microsoft/SoftTeacher /blob/main/ssod/utils/patch.py """ if not osp.exists(path): warnings.warn('The path of checkpoints does not exist.') return None if osp.exists(osp.join(path, f'latest.{suffix}')): return osp.join(path, f'latest.{suffix}') checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) if len(checkpoints) == 0: warnings.warn('There are no checkpoints in the path.') return None latest = -1 latest_path = None for checkpoint in checkpoints: count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0]) if count > latest: latest = count latest_path = checkpoint return latest_path def update_data_root(cfg, logger=None): """Update data root according to env MMDET_DATASETS. If set env MMDET_DATASETS, update cfg.data_root according to MMDET_DATASETS. Otherwise, using cfg.data_root as default. Args: cfg (mmcv.Config): The model config need to modify logger (logging.Logger | str | None): the way to print msg """ assert isinstance(cfg, mmcv.Config), \ f'cfg got wrong type: {type(cfg)}, expected mmcv.Config' if 'MMDET_DATASETS' in os.environ: dst_root = os.environ['MMDET_DATASETS'] print_log(f'MMDET_DATASETS has been set to be {dst_root}.' f'Using {dst_root} as data root.') else: return assert isinstance(cfg, mmcv.Config), \ f'cfg got wrong type: {type(cfg)}, expected mmcv.Config' def update(cfg, src_str, dst_str): for k, v in cfg.items(): if isinstance(v, mmcv.ConfigDict): update(cfg[k], src_str, dst_str) if isinstance(v, str) and src_str in v: cfg[k] = v.replace(src_str, dst_str) update(cfg.data, cfg.data_root, dst_root) cfg.data_root = dst_root _torch_version_div_indexing = ( 'parrots' not in TORCH_VERSION and digit_version(TORCH_VERSION) >= digit_version('1.8')) def floordiv(dividend, divisor, rounding_mode='trunc'): if _torch_version_div_indexing: return torch.div(dividend, divisor, rounding_mode=rounding_mode) else: return dividend // divisor ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/profiling.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import contextlib import sys import time import torch if sys.version_info >= (3, 7): @contextlib.contextmanager def profile_time(trace_name, name, enabled=True, stream=None, end_stream=None): """Print time spent by CPU and GPU. Useful as a temporary context manager to find sweet spots of code suitable for async implementation. """ if (not enabled) or not torch.cuda.is_available(): yield return stream = stream if stream else torch.cuda.current_stream() end_stream = end_stream if end_stream else stream start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) stream.record_event(start) try: cpu_start = time.monotonic() yield finally: cpu_end = time.monotonic() end_stream.record_event(end) end.synchronize() cpu_time = (cpu_end - cpu_start) * 1000 gpu_time = start.elapsed_time(end) msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' print(msg, end_stream) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/replace_cfg_vals.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import re from mmcv.utils import Config def replace_cfg_vals(ori_cfg): """Replace the string "${key}" with the corresponding value. Replace the "${key}" with the value of ori_cfg.key in the config. And support replacing the chained ${key}. Such as, replace "${key0.key1}" with the value of cfg.key0.key1. Code is modified from `vars.py < https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501 Args: ori_cfg (mmcv.utils.config.Config): The origin config with "${key}" generated from a file. Returns: updated_cfg [mmcv.utils.config.Config]: The config with "${key}" replaced by the corresponding value. """ def get_value(cfg, key): for k in key.split('.'): cfg = cfg[k] return cfg def replace_value(cfg): if isinstance(cfg, dict): return {key: replace_value(value) for key, value in cfg.items()} elif isinstance(cfg, list): return [replace_value(item) for item in cfg] elif isinstance(cfg, tuple): return tuple([replace_value(item) for item in cfg]) elif isinstance(cfg, str): # the format of string cfg may be: # 1) "${key}", which will be replaced with cfg.key directly # 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx", # which will be replaced with the string of the cfg.key keys = pattern_key.findall(cfg) values = [get_value(ori_cfg, key[2:-1]) for key in keys] if len(keys) == 1 and keys[0] == cfg: # the format of string cfg is "${key}" cfg = values[0] else: for key, value in zip(keys, values): # the format of string cfg is # "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx" assert not isinstance(value, (dict, list, tuple)), \ f'for the format of string cfg is ' \ f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \ f"the type of the value of '${key}' " \ f'can not be dict, list, or tuple' \ f'but you input {type(value)} in {cfg}' cfg = cfg.replace(key, str(value)) return cfg else: return cfg # the pattern of string "${key}" pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}') # the type of ori_cfg._cfg_dict is mmcv.utils.config.ConfigDict updated_cfg = Config( replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename) # replace the model with model_wrapper if updated_cfg.get('model_wrapper', None) is not None: updated_cfg.model = updated_cfg.model_wrapper updated_cfg.pop('model_wrapper') return updated_cfg ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/setup_env.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os import platform import warnings import cv2 import torch.multiprocessing as mp def setup_multi_processes(cfg): """Setup multi-processing environment variables.""" # set multi-process start method as `fork` to speed up the training if platform.system() != 'Windows': mp_start_method = cfg.get('mp_start_method', 'fork') current_method = mp.get_start_method(allow_none=True) if current_method is not None and current_method != mp_start_method: warnings.warn( f'Multi-processing start method `{mp_start_method}` is ' f'different from the previous setting `{current_method}`.' f'It will be force set to `{mp_start_method}`. You can change ' f'this behavior by changing `mp_start_method` in your config.') mp.set_start_method(mp_start_method, force=True) # disable opencv multithreading to avoid system being overloaded opencv_num_threads = cfg.get('opencv_num_threads', 0) cv2.setNumThreads(opencv_num_threads) # setup OMP threads # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa workers_per_gpu = cfg.data.get('workers_per_gpu', 1) if 'train_dataloader' in cfg.data: workers_per_gpu = \ max(cfg.data.train_dataloader.get('workers_per_gpu', 1), workers_per_gpu) if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: omp_num_threads = 1 warnings.warn( f'Setting OMP_NUM_THREADS environment variable for each process ' f'to be {omp_num_threads} in default, to avoid your system being ' f'overloaded, please further tune the variable for optimal ' f'performance in your application as needed.') os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) # setup MKL threads if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: mkl_num_threads = 1 warnings.warn( f'Setting MKL_NUM_THREADS environment variable for each process ' f'to be {mkl_num_threads} in default, to avoid your system being ' f'overloaded, please further tune the variable for optimal ' f'performance in your application as needed.') os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/split_batch.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch def split_batch(img, img_metas, kwargs): """Split data_batch by tags. Code is modified from # noqa: E501 Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[dict]): List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. kwargs (dict): Specific to concrete implementation. Returns: data_groups (dict): a dict that data_batch splited by tags, such as 'sup', 'unsup_teacher', and 'unsup_student'. """ # only stack img in the batch def fuse_list(obj_list, obj): return torch.stack(obj_list) if isinstance(obj, torch.Tensor) else obj_list # select data with tag from data_batch def select_group(data_batch, current_tag): group_flag = [tag == current_tag for tag in data_batch['tag']] return { k: fuse_list([vv for vv, gf in zip(v, group_flag) if gf], v) for k, v in data_batch.items() } kwargs.update({'img': img, 'img_metas': img_metas}) kwargs.update({'tag': [meta['tag'] for meta in img_metas]}) tags = list(set(kwargs['tag'])) data_groups = {tag: select_group(kwargs, tag) for tag in tags} for tag, group in data_groups.items(): group.pop('tag') return data_groups ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/util_distribution.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.parallel import MMDataParallel, MMDistributedDataParallel dp_factory = {'cuda': MMDataParallel, 'cpu': MMDataParallel} ddp_factory = {'cuda': MMDistributedDataParallel} def build_dp(model, device='cuda', dim=0, *args, **kwargs): """build DataParallel module by device type. if device is cuda, return a MMDataParallel model; if device is mlu, return a MLUDataParallel model. Args: model (:class:`nn.Module`): model to be parallelized. device (str): device type, cuda, cpu or mlu. Defaults to cuda. dim (int): Dimension used to scatter the data. Defaults to 0. Returns: nn.Module: the model to be parallelized. """ if device == 'npu': from mmcv.device.npu import NPUDataParallel dp_factory['npu'] = NPUDataParallel torch.npu.set_device(kwargs['device_ids'][0]) torch.npu.set_compile_mode(jit_compile=False) model = model.npu() elif device == 'cuda': model = model.cuda(kwargs['device_ids'][0]) elif device == 'mlu': from mmcv.device.mlu import MLUDataParallel dp_factory['mlu'] = MLUDataParallel model = model.mlu() return dp_factory[device](model, dim=dim, *args, **kwargs) def build_ddp(model, device='cuda', *args, **kwargs): """Build DistributedDataParallel module by device type. If device is cuda, return a MMDistributedDataParallel model; if device is mlu, return a MLUDistributedDataParallel model. Args: model (:class:`nn.Module`): module to be parallelized. device (str): device type, mlu or cuda. Returns: :class:`nn.Module`: the module to be parallelized References: .. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel. DistributedDataParallel.html """ assert device in ['cuda', 'mlu', 'npu'], 'Only available for cuda or mlu or npu devices.' if device == 'npu': from mmcv.device.npu import NPUDistributedDataParallel torch.npu.set_compile_mode(jit_compile=False) ddp_factory['npu'] = NPUDistributedDataParallel model = model.npu() elif device == 'cuda': model = model.cuda() elif device == 'mlu': from mmcv.device.mlu import MLUDistributedDataParallel ddp_factory['mlu'] = MLUDistributedDataParallel model = model.mlu() return ddp_factory[device](model, *args, **kwargs) def is_npu_available(): """Returns a bool indicating if NPU is currently available.""" return hasattr(torch, 'npu') and torch.npu.is_available() def is_mlu_available(): """Returns a bool indicating if MLU is currently available.""" return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available() def get_device(): """Returns an available device, cpu, cuda or mlu.""" is_device_available = { 'npu': is_npu_available(), 'cuda': torch.cuda.is_available(), 'mlu': is_mlu_available() } device_list = [k for k, v in is_device_available.items() if v] return device_list[0] if len(device_list) >= 1 else 'cpu' ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/util_mixins.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """This module defines the :class:`NiceRepr` mixin class, which defines a ``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__`` method, which you must define. This means you only have to overload one function instead of two. Furthermore, if the object defines a ``__len__`` method, then the ``__nice__`` method defaults to something sensible, otherwise it is treated as abstract and raises ``NotImplementedError``. To use simply have your object inherit from :class:`NiceRepr` (multi-inheritance should be ok). This code was copied from the ubelt library: https://github.com/Erotemic/ubelt Example: >>> # Objects that define __nice__ have a default __str__ and __repr__ >>> class Student(NiceRepr): ... def __init__(self, name): ... self.name = name ... def __nice__(self): ... return self.name >>> s1 = Student('Alice') >>> s2 = Student('Bob') >>> print(f's1 = {s1}') >>> print(f's2 = {s2}') s1 = s2 = Example: >>> # Objects that define __len__ have a default __nice__ >>> class Group(NiceRepr): ... def __init__(self, data): ... self.data = data ... def __len__(self): ... return len(self.data) >>> g = Group([1, 2, 3]) >>> print(f'g = {g}') g = """ import warnings class NiceRepr: """Inherit from this class and define ``__nice__`` to "nicely" print your objects. Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. If the inheriting class has a ``__len__``, method then the default ``__nice__`` method will return its length. Example: >>> class Foo(NiceRepr): ... def __nice__(self): ... return 'info' >>> foo = Foo() >>> assert str(foo) == '' >>> assert repr(foo).startswith('>> class Bar(NiceRepr): ... pass >>> bar = Bar() >>> import pytest >>> with pytest.warns(None) as record: >>> assert 'object at' in str(bar) >>> assert 'object at' in repr(bar) Example: >>> class Baz(NiceRepr): ... def __len__(self): ... return 5 >>> baz = Baz() >>> assert str(baz) == '' """ def __nice__(self): """str: a "nice" summary string describing this module""" if hasattr(self, '__len__'): # It is a common pattern for objects to use __len__ in __nice__ # As a convenience we define a default __nice__ for these objects return str(len(self)) else: # In all other cases force the subclass to overload __nice__ raise NotImplementedError( f'Define the __nice__ method for {self.__class__!r}') def __repr__(self): """str: the string of the module""" try: nice = self.__nice__() classname = self.__class__.__name__ return f'<{classname}({nice}) at {hex(id(self))}>' except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self) def __str__(self): """str: the string of the module""" try: classname = self.__class__.__name__ nice = self.__nice__() return f'<{classname}({nice})>' except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self) ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/utils/util_random.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """Helpers for random number generators.""" import numpy as np def ensure_rng(rng=None): """Coerces input into a random number generator. If the input is None, then a global random state is returned. If the input is a numeric value, then that is used as a seed to construct a random state. Otherwise the input is returned as-is. Adapted from [1]_. Args: rng (int | numpy.random.RandomState | None): if None, then defaults to the global rng. Otherwise this can be an integer or a RandomState class Returns: (numpy.random.RandomState) : rng - a numpy random number generator References: .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 """ if rng is None: rng = np.random.mtrand._rand elif isinstance(rng, int): rng = np.random.RandomState(rng) else: rng = rng return rng ================================================ FILE: DLTA_AI_app/mmdetection/mmdet/version.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. __version__ = '2.26.0' short_version = __version__ def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info) version_info = parse_version_info(__version__) ================================================ FILE: DLTA_AI_app/mmdetection/model-index.yml ================================================ Import: - configs/atss/metafile.yml - configs/autoassign/metafile.yml - configs/carafe/metafile.yml - configs/cascade_rcnn/metafile.yml - configs/cascade_rpn/metafile.yml - configs/centernet/metafile.yml - configs/centripetalnet/metafile.yml - configs/cornernet/metafile.yml - configs/convnext/metafile.yml - configs/dcn/metafile.yml - configs/dcnv2/metafile.yml - configs/deformable_detr/metafile.yml - configs/detectors/metafile.yml - configs/detr/metafile.yml - configs/double_heads/metafile.yml - configs/dyhead/metafile.yml - configs/dynamic_rcnn/metafile.yml - configs/efficientnet/metafile.yml - configs/empirical_attention/metafile.yml - configs/faster_rcnn/metafile.yml - configs/fcos/metafile.yml - configs/foveabox/metafile.yml - configs/fpg/metafile.yml - configs/free_anchor/metafile.yml - configs/fsaf/metafile.yml - configs/gcnet/metafile.yml - configs/gfl/metafile.yml - configs/ghm/metafile.yml - configs/gn/metafile.yml - configs/gn+ws/metafile.yml - configs/grid_rcnn/metafile.yml - configs/groie/metafile.yml - configs/guided_anchoring/metafile.yml - configs/hrnet/metafile.yml - configs/htc/metafile.yml - configs/instaboost/metafile.yml - configs/lad/metafile.yml - configs/ld/metafile.yml - configs/libra_rcnn/metafile.yml - configs/mask_rcnn/metafile.yml - configs/ms_rcnn/metafile.yml - configs/nas_fcos/metafile.yml - configs/nas_fpn/metafile.yml - configs/openimages/metafile.yml - configs/paa/metafile.yml - configs/pafpn/metafile.yml - configs/panoptic_fpn/metafile.yml - configs/pvt/metafile.yml - configs/pisa/metafile.yml - configs/point_rend/metafile.yml - configs/queryinst/metafile.yml - configs/regnet/metafile.yml - configs/reppoints/metafile.yml - configs/res2net/metafile.yml - configs/resnest/metafile.yml - configs/retinanet/metafile.yml - configs/sabl/metafile.yml - configs/scnet/metafile.yml - configs/scratch/metafile.yml - configs/seesaw_loss/metafile.yml - configs/sparse_rcnn/metafile.yml - configs/solo/metafile.yml - configs/ssd/metafile.yml - configs/swin/metafile.yml - configs/tridentnet/metafile.yml - configs/tood/metafile.yml - configs/vfnet/metafile.yml - configs/yolact/metafile.yml - configs/yolo/metafile.yml - configs/yolof/metafile.yml - configs/yolox/metafile.yml ================================================ FILE: DLTA_AI_app/mmdetection/pytest.ini ================================================ [pytest] addopts = --xdoctest --xdoctest-style=auto norecursedirs = .git ignore build __pycache__ data docker docs .eggs filterwarnings= default ignore:.*No cfgstr given in Cacher constructor or call.*:Warning ignore:.*Define the __nice__ method for.*:Warning ================================================ FILE: DLTA_AI_app/mmdetection/requirements/albu.txt ================================================ albumentations>=0.3.2 --no-binary qudida,albumentations ================================================ FILE: DLTA_AI_app/mmdetection/requirements/build.txt ================================================ # These must be installed before building mmdetection cython numpy ================================================ FILE: DLTA_AI_app/mmdetection/requirements/docs.txt ================================================ docutils==0.16.0 markdown>=3.4.0 myst-parser -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme sphinx==5.3.0 sphinx-copybutton sphinx_markdown_tables>=0.0.17 sphinx_rtd_theme ================================================ FILE: DLTA_AI_app/mmdetection/requirements/mminstall.txt ================================================ mmcv-full>=1.3.17 ================================================ FILE: DLTA_AI_app/mmdetection/requirements/optional.txt ================================================ cityscapesscripts imagecorruptions sklearn ================================================ FILE: DLTA_AI_app/mmdetection/requirements/readthedocs.txt ================================================ mmcv torch torchvision ================================================ FILE: DLTA_AI_app/mmdetection/requirements/runtime.txt ================================================ matplotlib numpy pycocotools scipy six terminaltables ================================================ FILE: DLTA_AI_app/mmdetection/requirements/tests.txt ================================================ asynctest codecov flake8 interrogate isort==4.3.21 # Note: used for kwarray.group_items, this may be ported to mmcv in the future. kwarray -e git+https://github.com/open-mmlab/mmtracking#egg=mmtrack onnx==1.7.0 onnxruntime>=1.8.0 protobuf<=3.20.1 pytest ubelt xdoctest>=0.10.0 yapf ================================================ FILE: DLTA_AI_app/mmdetection/requirements.txt ================================================ -r requirements/build.txt -r requirements/optional.txt -r requirements/runtime.txt -r requirements/tests.txt ================================================ FILE: DLTA_AI_app/mmdetection/setup.cfg ================================================ [isort] line_length = 79 multi_line_output = 0 extra_standard_library = setuptools known_first_party = mmdet known_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml no_lines_before = STDLIB,LOCALFOLDER default_section = THIRDPARTY [yapf] BASED_ON_STYLE = pep8 BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true # ignore-words-list needs to be lowercase format. For example, if we want to # ignore word "BA", then we need to append "ba" to ignore-words-list rather # than "BA" [codespell] skip = *.ipynb quiet-level = 3 ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood,ba,warmup,nam ================================================ FILE: DLTA_AI_app/mmdetection/setup.py ================================================ #!/usr/bin/env python # Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp import platform import shutil import sys import warnings from setuptools import find_packages, setup import torch from torch.utils.cpp_extension import (BuildExtension, CppExtension, CUDAExtension) def readme(): with open('README.md', encoding='utf-8') as f: content = f.read() return content version_file = 'mmdet/version.py' def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] def make_cuda_ext(name, module, sources, sources_cuda=[]): define_macros = [] extra_compile_args = {'cxx': []} if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1': define_macros += [('WITH_CUDA', None)] extension = CUDAExtension extra_compile_args['nvcc'] = [ '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ] sources += sources_cuda else: print(f'Compiling {name} without CUDA') extension = CppExtension return extension( name=f'{module}.{name}', sources=[os.path.join(*module.split('.'), p) for p in sources], define_macros=define_macros, extra_compile_args=extra_compile_args) def parse_requirements(fname='requirements.txt', with_version=True): """Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())" """ import re import sys from os.path import exists require_fpath = fname def parse_line(line): """Parse information from a line in a requirements text file.""" if line.startswith('-r '): # Allow specifying requirements in other files target = line.split(' ')[1] for info in parse_require_file(target): yield info else: info = {'line': line} if line.startswith('-e '): info['package'] = line.split('#egg=')[1] elif '@git+' in line: info['package'] = line else: # Remove versioning from the package pat = '(' + '|'.join(['>=', '==', '>']) + ')' parts = re.split(pat, line, maxsplit=1) parts = [p.strip() for p in parts] info['package'] = parts[0] if len(parts) > 1: op, rest = parts[1:] if ';' in rest: # Handle platform specific dependencies # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies version, platform_deps = map(str.strip, rest.split(';')) info['platform_deps'] = platform_deps else: version = rest # NOQA info['version'] = (op, version) yield info def parse_require_file(fpath): with open(fpath, 'r') as f: for line in f.readlines(): line = line.strip() if line and not line.startswith('#'): for info in parse_line(line): yield info def gen_packages_items(): if exists(require_fpath): for info in parse_require_file(require_fpath): parts = [info['package']] if with_version and 'version' in info: parts.extend(info['version']) if not sys.version.startswith('3.4'): # apparently package_deps are broken in 3.4 platform_deps = info.get('platform_deps') if platform_deps is not None: parts.append(';' + platform_deps) item = ''.join(parts) yield item packages = list(gen_packages_items()) return packages def add_mim_extension(): """Add extra files that are required to support MIM into the package. These files will be added by creating a symlink to the originals if the package is installed in `editable` mode (e.g. pip install -e .), or by copying from the originals otherwise. """ # parse installment mode if 'develop' in sys.argv: # installed by `pip install -e .` if platform.system() == 'Windows': # set `copy` mode here since symlink fails on Windows. mode = 'copy' else: mode = 'symlink' elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: # installed by `pip install .` # or create source distribution by `python setup.py sdist` mode = 'copy' else: return filenames = ['tools', 'configs', 'demo', 'model-index.yml'] repo_path = osp.dirname(__file__) mim_path = osp.join(repo_path, 'mmdet', '.mim') os.makedirs(mim_path, exist_ok=True) for filename in filenames: if osp.exists(filename): src_path = osp.join(repo_path, filename) tar_path = osp.join(mim_path, filename) if osp.isfile(tar_path) or osp.islink(tar_path): os.remove(tar_path) elif osp.isdir(tar_path): shutil.rmtree(tar_path) if mode == 'symlink': src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) os.symlink(src_relpath, tar_path) elif mode == 'copy': if osp.isfile(src_path): shutil.copyfile(src_path, tar_path) elif osp.isdir(src_path): shutil.copytree(src_path, tar_path) else: warnings.warn(f'Cannot copy file {src_path}.') else: raise ValueError(f'Invalid mode {mode}') if __name__ == '__main__': add_mim_extension() setup( name='mmdet', version=get_version(), description='OpenMMLab Detection Toolbox and Benchmark', long_description=readme(), long_description_content_type='text/markdown', author='MMDetection Contributors', author_email='openmmlab@gmail.com', keywords='computer vision, object detection', url='https://github.com/open-mmlab/mmdetection', packages=find_packages(exclude=('configs', 'tools', 'demo')), include_package_data=True, classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], license='Apache License 2.0', install_requires=parse_requirements('requirements/runtime.txt'), extras_require={ 'all': parse_requirements('requirements.txt'), 'tests': parse_requirements('requirements/tests.txt'), 'build': parse_requirements('requirements/build.txt'), 'optional': parse_requirements('requirements/optional.txt'), 'mim': parse_requirements('requirements/mminstall.txt'), }, ext_modules=[], cmdclass={'build_ext': BuildExtension}, zip_safe=False) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_datasets/test_coco_dataset.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile import mmcv import pytest from mmdet.datasets import CocoDataset def _create_ids_error_coco_json(json_name): image = { 'id': 0, 'width': 640, 'height': 640, 'file_name': 'fake_name.jpg', } annotation_1 = { 'id': 1, 'image_id': 0, 'category_id': 0, 'area': 400, 'bbox': [50, 60, 20, 20], 'iscrowd': 0, } annotation_2 = { 'id': 1, 'image_id': 0, 'category_id': 0, 'area': 900, 'bbox': [100, 120, 30, 30], 'iscrowd': 0, } categories = [{ 'id': 0, 'name': 'car', 'supercategory': 'car', }] fake_json = { 'images': [image], 'annotations': [annotation_1, annotation_2], 'categories': categories } mmcv.dump(fake_json, json_name) def test_coco_annotation_ids_unique(): tmp_dir = tempfile.TemporaryDirectory() fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') _create_ids_error_coco_json(fake_json_file) # test annotation ids not unique error with pytest.raises(AssertionError): CocoDataset(ann_file=fake_json_file, classes=('car', ), pipeline=[]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_datasets/test_common.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import logging import os.path as osp import tempfile from unittest.mock import MagicMock, patch import mmcv import numpy as np import pytest import torch import torch.nn as nn from mmcv.runner import EpochBasedRunner from torch.utils.data import DataLoader from mmdet.core.evaluation import DistEvalHook, EvalHook from mmdet.datasets import DATASETS, CocoDataset, CustomDataset, build_dataset def _create_dummy_coco_json(json_name): image = { 'id': 0, 'width': 640, 'height': 640, 'file_name': 'fake_name.jpg', } annotation_1 = { 'id': 1, 'image_id': 0, 'category_id': 0, 'area': 400, 'bbox': [50, 60, 20, 20], 'iscrowd': 0, } annotation_2 = { 'id': 2, 'image_id': 0, 'category_id': 0, 'area': 900, 'bbox': [100, 120, 30, 30], 'iscrowd': 0, } annotation_3 = { 'id': 3, 'image_id': 0, 'category_id': 0, 'area': 1600, 'bbox': [150, 160, 40, 40], 'iscrowd': 0, } annotation_4 = { 'id': 4, 'image_id': 0, 'category_id': 0, 'area': 10000, 'bbox': [250, 260, 100, 100], 'iscrowd': 0, } categories = [{ 'id': 0, 'name': 'car', 'supercategory': 'car', }] fake_json = { 'images': [image], 'annotations': [annotation_1, annotation_2, annotation_3, annotation_4], 'categories': categories } mmcv.dump(fake_json, json_name) def _create_dummy_custom_pkl(pkl_name): fake_pkl = [{ 'filename': 'fake_name.jpg', 'width': 640, 'height': 640, 'ann': { 'bboxes': np.array([[50, 60, 70, 80], [100, 120, 130, 150], [150, 160, 190, 200], [250, 260, 350, 360]]), 'labels': np.array([0, 0, 0, 0]) } }] mmcv.dump(fake_pkl, pkl_name) def _create_dummy_results(): boxes = [ np.array([[50, 60, 70, 80, 1.0], [100, 120, 130, 150, 0.98], [150, 160, 190, 200, 0.96], [250, 260, 350, 360, 0.95]]) ] return [boxes] @pytest.mark.parametrize('config_path', ['./configs/_base_/datasets/voc0712.py']) def test_dataset_init(config_path, monkeypatch): data_config = mmcv.Config.fromfile(config_path) if 'data' not in data_config: return monkeypatch.chdir('./tests/') # to use ./tests/data stage_names = ['train', 'val', 'test'] for stage_name in stage_names: dataset_config = copy.deepcopy(data_config.data.get(stage_name)) dataset = build_dataset(dataset_config) dataset[0] def test_dataset_evaluation(): tmp_dir = tempfile.TemporaryDirectory() # create dummy data fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') _create_dummy_coco_json(fake_json_file) # test single coco dataset evaluation coco_dataset = CocoDataset( ann_file=fake_json_file, classes=('car', ), pipeline=[]) fake_results = _create_dummy_results() eval_results = coco_dataset.evaluate(fake_results, classwise=True) assert eval_results['bbox_mAP'] == 1 assert eval_results['bbox_mAP_50'] == 1 assert eval_results['bbox_mAP_75'] == 1 # test concat dataset evaluation fake_concat_results = _create_dummy_results() + _create_dummy_results() # build concat dataset through two config dict coco_cfg = dict( type='CocoDataset', ann_file=fake_json_file, classes=('car', ), pipeline=[]) concat_cfgs = [coco_cfg, coco_cfg] concat_dataset = build_dataset(concat_cfgs) eval_results = concat_dataset.evaluate(fake_concat_results) assert eval_results['0_bbox_mAP'] == 1 assert eval_results['0_bbox_mAP_50'] == 1 assert eval_results['0_bbox_mAP_75'] == 1 assert eval_results['1_bbox_mAP'] == 1 assert eval_results['1_bbox_mAP_50'] == 1 assert eval_results['1_bbox_mAP_75'] == 1 # build concat dataset through concatenated ann_file coco_cfg = dict( type='CocoDataset', ann_file=[fake_json_file, fake_json_file], classes=('car', ), pipeline=[]) concat_dataset = build_dataset(coco_cfg) eval_results = concat_dataset.evaluate(fake_concat_results) assert eval_results['0_bbox_mAP'] == 1 assert eval_results['0_bbox_mAP_50'] == 1 assert eval_results['0_bbox_mAP_75'] == 1 assert eval_results['1_bbox_mAP'] == 1 assert eval_results['1_bbox_mAP_50'] == 1 assert eval_results['1_bbox_mAP_75'] == 1 # create dummy data fake_pkl_file = osp.join(tmp_dir.name, 'fake_data.pkl') _create_dummy_custom_pkl(fake_pkl_file) # test single custom dataset evaluation custom_dataset = CustomDataset( ann_file=fake_pkl_file, classes=('car', ), pipeline=[]) fake_results = _create_dummy_results() eval_results = custom_dataset.evaluate(fake_results) assert eval_results['mAP'] == 1 # test concat dataset evaluation fake_concat_results = _create_dummy_results() + _create_dummy_results() # build concat dataset through two config dict custom_cfg = dict( type='CustomDataset', ann_file=fake_pkl_file, classes=('car', ), pipeline=[]) concat_cfgs = [custom_cfg, custom_cfg] concat_dataset = build_dataset(concat_cfgs) eval_results = concat_dataset.evaluate(fake_concat_results) assert eval_results['0_mAP'] == 1 assert eval_results['1_mAP'] == 1 # build concat dataset through concatenated ann_file concat_cfg = dict( type='CustomDataset', ann_file=[fake_pkl_file, fake_pkl_file], classes=('car', ), pipeline=[]) concat_dataset = build_dataset(concat_cfg) eval_results = concat_dataset.evaluate(fake_concat_results) assert eval_results['0_mAP'] == 1 assert eval_results['1_mAP'] == 1 # build concat dataset through explicit type concat_cfg = dict( type='ConcatDataset', datasets=[custom_cfg, custom_cfg], separate_eval=False) concat_dataset = build_dataset(concat_cfg) eval_results = concat_dataset.evaluate(fake_concat_results, metric='mAP') assert eval_results['mAP'] == 1 assert len(concat_dataset.datasets[0].data_infos) == \ len(concat_dataset.datasets[1].data_infos) assert len(concat_dataset.datasets[0].data_infos) == 1 tmp_dir.cleanup() @patch('mmdet.apis.single_gpu_test', MagicMock) @patch('mmdet.apis.multi_gpu_test', MagicMock) @pytest.mark.parametrize('EvalHookParam', (EvalHook, DistEvalHook)) def test_evaluation_hook(EvalHookParam): # create dummy data dataloader = DataLoader(torch.ones((5, 2))) # 0.1. dataloader is not a DataLoader object with pytest.raises(TypeError): EvalHookParam(dataloader=MagicMock(), interval=-1) # 0.2. negative interval with pytest.raises(ValueError): EvalHookParam(dataloader, interval=-1) # 1. start=None, interval=1: perform evaluation after each epoch. runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, interval=1) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2 # 2. start=1, interval=1: perform evaluation after each epoch. runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=1, interval=1) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2 # 3. start=None, interval=2: perform evaluation after epoch 2, 4, 6, etc runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, interval=2) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert evalhook.evaluate.call_count == 1 # after epoch 2 # 4. start=1, interval=2: perform evaluation after epoch 1, 3, 5, etc runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=1, interval=2) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 3) assert evalhook.evaluate.call_count == 2 # after epoch 1 & 3 # 5. start=0/negative, interval=1: perform evaluation after each epoch and # before epoch 1. runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=0) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2 # 6. start=0, interval=2, dynamic_intervals=[(3, 1)]: the evaluation # interval is 2 when it is less than 3 epoch, otherwise it is 1. runner = _build_demo_runner() evalhook = EvalHookParam( dataloader, start=0, interval=2, dynamic_intervals=[(3, 1)]) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 4) assert evalhook.evaluate.call_count == 3 # the evaluation start epoch cannot be less than 0 runner = _build_demo_runner() with pytest.raises(ValueError): EvalHookParam(dataloader, start=-2) evalhook = EvalHookParam(dataloader, start=0) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2 # 6. resuming from epoch i, start = x (x<=i), interval =1: perform # evaluation after each epoch and before the first epoch. runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=1) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner._epoch = 2 runner.run([dataloader], [('train', 1)], 3) assert evalhook.evaluate.call_count == 2 # before & after epoch 3 # 7. resuming from epoch i, start = i+1/None, interval =1: perform # evaluation after each epoch. runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=2) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner._epoch = 1 runner.run([dataloader], [('train', 1)], 3) assert evalhook.evaluate.call_count == 2 # after epoch 2 & 3 def _build_demo_runner(): class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(2, 1) def forward(self, x): return self.linear(x) def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) def val_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) model = Model() tmp_dir = tempfile.mkdtemp() runner = EpochBasedRunner( model=model, work_dir=tmp_dir, logger=logging.getLogger()) return runner @pytest.mark.parametrize('classes, expected_length', [(['bus'], 2), (['car'], 1), (['bus', 'car'], 2)]) def test_allow_empty_images(classes, expected_length): dataset_class = DATASETS.get('CocoDataset') # Filter empty images filtered_dataset = dataset_class( ann_file='tests/data/coco_sample.json', img_prefix='tests/data', pipeline=[], classes=classes, filter_empty_gt=True) # Get all full_dataset = dataset_class( ann_file='tests/data/coco_sample.json', img_prefix='tests/data', pipeline=[], classes=classes, filter_empty_gt=False) assert len(filtered_dataset) == expected_length assert len(filtered_dataset.img_ids) == expected_length assert len(full_dataset) == 3 assert len(full_dataset.img_ids) == 3 assert filtered_dataset.CLASSES == classes assert full_dataset.CLASSES == classes ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_datasets/test_custom_dataset.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import unittest from unittest.mock import MagicMock, patch import pytest from mmdet.datasets import DATASETS @patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock()) @patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock()) @patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock()) @patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock()) @patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock) @patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock) @patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock) @patch('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock) @pytest.mark.parametrize('dataset', ['CocoDataset', 'VOCDataset', 'CityscapesDataset']) def test_custom_classes_override_default(dataset): dataset_class = DATASETS.get(dataset) if dataset in ['CocoDataset', 'CityscapesDataset']: dataset_class.coco = MagicMock() dataset_class.cat_ids = MagicMock() original_classes = dataset_class.CLASSES # Test setting classes as a tuple custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=('bus', 'car'), test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES != original_classes assert custom_dataset.CLASSES == ('bus', 'car') print(custom_dataset) # Test setting classes as a list custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=['bus', 'car'], test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES != original_classes assert custom_dataset.CLASSES == ['bus', 'car'] print(custom_dataset) # Test overriding not a subset custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=['foo'], test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES != original_classes assert custom_dataset.CLASSES == ['foo'] print(custom_dataset) # Test default behavior custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=None, test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES == original_classes print(custom_dataset) # Test sending file path import tempfile with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + 'classes.txt' with open(path, 'w') as f: f.write('bus\ncar\n') custom_dataset = dataset_class( ann_file=MagicMock(), pipeline=[], classes=path, test_mode=True, img_prefix='VOC2007' if dataset == 'VOCDataset' else '') assert custom_dataset.CLASSES != original_classes assert custom_dataset.CLASSES == ['bus', 'car'] print(custom_dataset) class CustomDatasetTests(unittest.TestCase): def setUp(self): super().setUp() self.data_dir = osp.join( osp.dirname(osp.dirname(osp.dirname(__file__))), 'data') self.dataset_class = DATASETS.get('XMLDataset') def test_data_infos__default_db_directories(self): """Test correct data read having a Pacal-VOC directory structure.""" test_dataset_root = osp.join(self.data_dir, 'VOCdevkit', 'VOC2007') custom_ds = self.dataset_class( data_root=test_dataset_root, ann_file=osp.join(test_dataset_root, 'ImageSets', 'Main', 'trainval.txt'), pipeline=[], classes=('person', 'dog'), test_mode=True) self.assertListEqual([{ 'id': '000001', 'filename': osp.join('JPEGImages', '000001.jpg'), 'width': 353, 'height': 500 }], custom_ds.data_infos) def test_data_infos__overridden_db_subdirectories(self): """Test correct data read having a customized directory structure.""" test_dataset_root = osp.join(self.data_dir, 'custom_dataset') custom_ds = self.dataset_class( data_root=test_dataset_root, ann_file=osp.join(test_dataset_root, 'trainval.txt'), pipeline=[], classes=('person', 'dog'), test_mode=True, img_prefix='', img_subdir='images', ann_subdir='images') self.assertListEqual([{ 'id': '000001', 'filename': osp.join('images', '000001.jpg'), 'width': 353, 'height': 500 }], custom_ds.data_infos) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_datasets/test_dataset_wrapper.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import bisect import math from collections import defaultdict from unittest.mock import MagicMock import numpy as np import pytest from mmdet.datasets import (ClassBalancedDataset, ConcatDataset, CustomDataset, MultiImageMixDataset, RepeatDataset) def test_dataset_wrapper(): CustomDataset.load_annotations = MagicMock() CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx) dataset_a = CustomDataset( ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='') len_a = 10 cat_ids_list_a = [ np.random.randint(0, 80, num).tolist() for num in np.random.randint(1, 20, len_a) ] ann_info_list_a = [] for _ in range(len_a): height = np.random.randint(10, 30) weight = np.random.randint(10, 30) img = np.ones((height, weight, 3)) gt_bbox = np.concatenate([ np.random.randint(1, 5, (2, 2)), np.random.randint(1, 5, (2, 2)) + 5 ], axis=1) gt_labels = np.random.randint(0, 80, 2) ann_info_list_a.append( dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img)) dataset_a.data_infos = MagicMock() dataset_a.data_infos.__len__.return_value = len_a dataset_a.get_cat_ids = MagicMock( side_effect=lambda idx: cat_ids_list_a[idx]) dataset_a.get_ann_info = MagicMock( side_effect=lambda idx: ann_info_list_a[idx]) dataset_b = CustomDataset( ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='') len_b = 20 cat_ids_list_b = [ np.random.randint(0, 80, num).tolist() for num in np.random.randint(1, 20, len_b) ] ann_info_list_b = [] for _ in range(len_b): height = np.random.randint(10, 30) weight = np.random.randint(10, 30) img = np.ones((height, weight, 3)) gt_bbox = np.concatenate([ np.random.randint(1, 5, (2, 2)), np.random.randint(1, 5, (2, 2)) + 5 ], axis=1) gt_labels = np.random.randint(0, 80, 2) ann_info_list_b.append( dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img)) dataset_b.data_infos = MagicMock() dataset_b.data_infos.__len__.return_value = len_b dataset_b.get_cat_ids = MagicMock( side_effect=lambda idx: cat_ids_list_b[idx]) dataset_b.get_ann_info = MagicMock( side_effect=lambda idx: ann_info_list_b[idx]) concat_dataset = ConcatDataset([dataset_a, dataset_b]) assert concat_dataset[5] == 5 assert concat_dataset[25] == 15 assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5] assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15] assert concat_dataset.get_ann_info(5) == ann_info_list_a[5] assert concat_dataset.get_ann_info(25) == ann_info_list_b[15] assert len(concat_dataset) == len(dataset_a) + len(dataset_b) # Test if ConcatDataset allows dataset classes without the PALETTE # attribute palette_backup = CustomDataset.PALETTE delattr(CustomDataset, 'PALETTE') concat_dataset = ConcatDataset([dataset_a, dataset_b]) assert concat_dataset.PALETTE is None CustomDataset.PALETTE = palette_backup repeat_dataset = RepeatDataset(dataset_a, 10) assert repeat_dataset[5] == 5 assert repeat_dataset[15] == 5 assert repeat_dataset[27] == 7 assert repeat_dataset.get_cat_ids(5) == cat_ids_list_a[5] assert repeat_dataset.get_cat_ids(15) == cat_ids_list_a[5] assert repeat_dataset.get_cat_ids(27) == cat_ids_list_a[7] assert repeat_dataset.get_ann_info(5) == ann_info_list_a[5] assert repeat_dataset.get_ann_info(15) == ann_info_list_a[5] assert repeat_dataset.get_ann_info(27) == ann_info_list_a[7] assert len(repeat_dataset) == 10 * len(dataset_a) # Test if RepeatDataset allows dataset classes without the PALETTE # attribute delattr(CustomDataset, 'PALETTE') repeat_dataset = RepeatDataset(dataset_a, 10) assert repeat_dataset.PALETTE is None CustomDataset.PALETTE = palette_backup category_freq = defaultdict(int) for cat_ids in cat_ids_list_a: cat_ids = set(cat_ids) for cat_id in cat_ids: category_freq[cat_id] += 1 for k, v in category_freq.items(): category_freq[k] = v / len(cat_ids_list_a) mean_freq = np.mean(list(category_freq.values())) repeat_thr = mean_freq category_repeat = { cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) for cat_id, cat_freq in category_freq.items() } repeat_factors = [] for cat_ids in cat_ids_list_a: cat_ids = set(cat_ids) repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids}) repeat_factors.append(math.ceil(repeat_factor)) repeat_factors_cumsum = np.cumsum(repeat_factors) repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr) assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1] for idx in np.random.randint(0, len(repeat_factor_dataset), 3): assert repeat_factor_dataset[idx] == bisect.bisect_right( repeat_factors_cumsum, idx) assert repeat_factor_dataset.get_ann_info(idx) == ann_info_list_a[ bisect.bisect_right(repeat_factors_cumsum, idx)] # Test if ClassBalancedDataset allows dataset classes without the PALETTE # attribute delattr(CustomDataset, 'PALETTE') repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr) assert repeat_factor_dataset.PALETTE is None CustomDataset.PALETTE = palette_backup img_scale = (60, 60) pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.1, 2), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict( type='MixUp', img_scale=img_scale, ratio_range=(0.8, 1.6), pad_val=114.0), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Resize', img_scale=img_scale, keep_ratio=True), dict(type='Pad', pad_to_square=True, pad_val=114.0), ] CustomDataset.load_annotations = MagicMock() results = [] for _ in range(2): height = np.random.randint(10, 30) weight = np.random.randint(10, 30) img = np.ones((height, weight, 3)) gt_bbox = np.concatenate([ np.random.randint(1, 5, (2, 2)), np.random.randint(1, 5, (2, 2)) + 5 ], axis=1) gt_labels = np.random.randint(0, 80, 2) results.append(dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img)) CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: results[idx]) dataset_a = CustomDataset( ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='') len_a = 2 cat_ids_list_a = [ np.random.randint(0, 80, num).tolist() for num in np.random.randint(1, 20, len_a) ] dataset_a.data_infos = MagicMock() dataset_a.data_infos.__len__.return_value = len_a dataset_a.get_cat_ids = MagicMock( side_effect=lambda idx: cat_ids_list_a[idx]) # test dynamic_scale deprecated with pytest.raises(RuntimeError): MultiImageMixDataset(dataset_a, pipeline, (80, 80)) multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline) for idx in range(len_a): results_ = multi_image_mix_dataset[idx] assert results_['img'].shape == (img_scale[0], img_scale[1], 3) # test skip_type_keys multi_image_mix_dataset = MultiImageMixDataset( dataset_a, pipeline, skip_type_keys=('MixUp', 'RandomFlip', 'Resize', 'Pad')) for idx in range(len_a): results_ = multi_image_mix_dataset[idx] assert results_['img'].shape == (img_scale[0], img_scale[1], 3) # Test if MultiImageMixDataset allows dataset classes without the PALETTE # attribute delattr(CustomDataset, 'PALETTE') multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline) assert multi_image_mix_dataset.PALETTE is None CustomDataset.PALETTE = palette_backup ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_datasets/test_openimages_dataset.py ================================================ import csv import os.path as osp import tempfile import mmcv import numpy as np import pytest from mmdet.datasets import OpenImagesChallengeDataset, OpenImagesDataset def _create_ids_error_oid_csv( label_file, fake_csv_file, ): label_description = ['/m/000002', 'Football'] # `newline=''` is used to avoid index error of out of bounds # in Windows system with open(label_file, 'w', newline='') as f: f_csv = csv.writer(f) f_csv.writerow(label_description) header = [ 'ImageID', 'Source', 'LabelName', 'Confidence', 'XMin', 'XMax', 'YMin', 'YMax', 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction', 'IsInside' ] annotations = [[ 'color', 'xclick', '/m/000002', '1', '0.022673031', '0.9642005', '0.07103825', '0.80054647', '0', '0', '0', '0', '0' ], [ '000595fe6fee6369', 'xclick', '/m/000000', '1', '0', '1', '0', '1', '0', '0', '1', '0', '0' ]] # `newline=''` is used to avoid index error of out of bounds # in Windows system with open(fake_csv_file, 'w', newline='') as f: f_csv = csv.writer(f) f_csv.writerow(header) f_csv.writerows(annotations) def _create_oid_style_ann(label_file, csv_file, label_level_file): label_description = [['/m/000000', 'Sports equipment'], ['/m/000001', 'Ball'], ['/m/000002', 'Football'], ['/m/000004', 'Bicycle']] with open(label_file, 'w', newline='') as f: f_csv = csv.writer(f) f_csv.writerows(label_description) header = [ 'ImageID', 'Source', 'LabelName', 'Confidence', 'XMin', 'XMax', 'YMin', 'YMax', 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction', 'IsInside' ] annotations = [ [ 'color', 'xclick', '/m/000002', 1, 0.0333333, 0.1, 0.0333333, 0.1, 0, 0, 1, 0, 0 ], [ 'color', 'xclick', '/m/000002', 1, 0.1, 0.166667, 0.1, 0.166667, 0, 0, 0, 0, 0 ], ] # `newline=''` is used to avoid index error of out of bounds # in Windows system with open(csv_file, 'w', newline='') as f: f_csv = csv.writer(f) f_csv.writerow(header) f_csv.writerows(annotations) header = ['ImageID', 'Source', 'LabelName', 'Confidence'] annotations = [['color', 'xclick', '/m/000002', '1'], ['color', 'xclick', '/m/000004', '0']] # `newline=''` is used to avoid index error of out of bounds # in Windows system with open(label_level_file, 'w', newline='') as f: f_csv = csv.writer(f) f_csv.writerow(header) f_csv.writerows(annotations) def _create_hierarchy_json(hierarchy_name): fake_hierarchy = \ {'LabelName': '/m/0bl9f', # entity label 'Subcategory': [ { 'LabelName': '/m/000000', 'Subcategory': [ {'LabelName': '/m/000001', 'Subcategory': [ { 'LabelName': '/m/000002' } ] }, { 'LabelName': '/m/000004' } ] } ] } mmcv.dump(fake_hierarchy, hierarchy_name) def _create_hierarchy_np(hierarchy_name): fake_hierarchy = np.array([[0, 1, 0, 0, 0], [0, 1, 1, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 0, 1], [0, 0, 0, 0, 0]]) with open(hierarchy_name, 'wb') as f: np.save(f, fake_hierarchy) def _create_dummy_results(): boxes = [ np.zeros((0, 5)), np.zeros((0, 5)), np.array([[10, 10, 15, 15, 1.0], [15, 15, 30, 30, 0.98], [10, 10, 25, 25, 0.98], [28, 28, 35, 35, 0.97], [30, 30, 51, 51, 0.96], [100, 110, 120, 130, 0.15]]), np.array([[30, 30, 50, 50, 0.51]]), ] return [boxes] def _creat_oid_challenge_style_ann(txt_file, label_file, label_level_file): bboxes = [ 'validation/color.jpg\n', '4 29\n', '2\n', '1 0.0333333 0.1 0.0333333 0.1 1\n', '1 0.1 0.166667 0.1 0.166667 0\n', ] # `newline=''` is used to avoid index error of out of bounds # in Windows system with open(txt_file, 'w', newline='') as f: f.writelines(bboxes) f.close() label_description = [['/m/000000', 'Sports equipment', 1], ['/m/000001', 'Ball', 2], ['/m/000002', 'Football', 3], ['/m/000004', 'Bicycle', 4]] # `newline=''` is used to avoid index error of out of bounds # in Windows system with open(label_file, 'w', newline='') as f: f_csv = csv.writer(f) f_csv.writerows(label_description) header = ['ImageID', 'LabelName', 'Confidence'] annotations = [['color', '/m/000001', '1'], ['color', '/m/000000', '0']] # `newline=''` is used to avoid index error of out of bounds # in Windows system with open(label_level_file, 'w', newline='') as f: f_csv = csv.writer(f) f_csv.writerow(header) f_csv.writerows(annotations) def _create_metas(meta_file): fake_meta = [{ 'filename': 'data/OpenImages/OpenImages/validation/color.jpg', 'ori_shape': (300, 300, 3) }] mmcv.dump(fake_meta, meta_file) def test_oid_annotation_ids_unique(): # create fake ann files tmp_dir = tempfile.TemporaryDirectory() fake_label_file = osp.join(tmp_dir.name, 'fake_label.csv') fake_ann_file = osp.join(tmp_dir.name, 'fake_ann.csv') _create_ids_error_oid_csv(fake_label_file, fake_ann_file) # test annotation ids not unique error with pytest.raises(AssertionError): OpenImagesDataset( ann_file=fake_ann_file, label_file=fake_label_file, pipeline=[]) tmp_dir.cleanup() def test_openimages_dataset(): # create fake ann files tmp_dir = tempfile.TemporaryDirectory() label_file = osp.join(tmp_dir.name, 'label_file.csv') ann_file = osp.join(tmp_dir.name, 'ann_file.csv') label_level_file = osp.join(tmp_dir.name, 'label_level_file.csv') _create_oid_style_ann(label_file, ann_file, label_level_file) hierarchy_json = osp.join(tmp_dir.name, 'hierarchy.json') _create_hierarchy_json(hierarchy_json) # test whether hierarchy_file is not None when set # get_parent_classes is True with pytest.raises(AssertionError): OpenImagesDataset( ann_file=ann_file, label_file=label_file, image_level_ann_file=label_level_file, pipeline=[]) dataset = OpenImagesDataset( ann_file=ann_file, label_file=label_file, image_level_ann_file=label_level_file, hierarchy_file=hierarchy_json, pipeline=[]) ann = dataset.get_ann_info(0) # two legal detection bboxes with `group_of` parameter assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == \ ann['gt_is_group_ofs'].shape[0] == 2 # test load metas from pipeline img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(128, 128), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] dataset = OpenImagesDataset( ann_file=ann_file, img_prefix='tests/data', label_file=label_file, image_level_ann_file=label_level_file, load_from_file=False, hierarchy_file=hierarchy_json, pipeline=test_pipeline) dataset.prepare_test_img(0) assert len(dataset.test_img_metas) == 1 result = _create_dummy_results() dataset.evaluate(result) # test get hierarchy for classes hierarchy_json = osp.join(tmp_dir.name, 'hierarchy.json') _create_hierarchy_json(hierarchy_json) # test with hierarchy file wrong suffix with pytest.raises(AssertionError): fake_path = osp.join(tmp_dir.name, 'hierarchy.csv') OpenImagesDataset( ann_file=ann_file, img_prefix='tests/data', label_file=label_file, image_level_ann_file=label_level_file, load_from_file=False, hierarchy_file=fake_path, pipeline=test_pipeline) # test load hierarchy file succseefully hierarchy = dataset.get_relation_matrix(hierarchy_json) hierarchy_gt = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [1, 0, 0, 1]]) assert np.equal(hierarchy, hierarchy_gt).all() # test evaluation # create fake metas meta_file = osp.join(tmp_dir.name, 'meta.pkl') _create_metas(meta_file) dataset = OpenImagesDataset( ann_file=ann_file, label_file=label_file, image_level_ann_file=label_level_file, hierarchy_file=hierarchy_json, meta_file=meta_file, pipeline=[]) # test evaluation with using group_of, adding father classes to # GT and annotations, and considering image_level_image, # In the first label (Sports equipment): tp = [0, 1, 0, 0, 1], # fp = [1, 0, 1, 1, 0] # In the second label (Ball), tp = [0, 1, 0, 1], fp = [1, 0, 1, 0]. # In the third label (Football), tp = [0, 1, 0, 1], fp = [1, 0, 1, 0]. # In the forth label (Bicycle), tp = [0], fp = [1]. result = _create_dummy_results() parsed_results = dataset.evaluate(result) assert np.isclose(parsed_results['mAP'], 0.8333, 1e-4) dataset = OpenImagesDataset( ann_file=ann_file, label_file=label_file, load_image_level_labels=False, image_level_ann_file=label_level_file, hierarchy_file=hierarchy_json, meta_file=meta_file, pipeline=[]) # test evaluation with using group_of, adding father classes to # GT and annotations, and not considering image_level_image, # In the first label (Sports equipment): tp = [0, 1, 0, 0, 1], # fp = [1, 0, 1, 1, 0] # In the second label (Ball), tp = [0, 1, 0, 1], fp = [1, 0, 1, 0]. # In the third label (Football), tp = [0, 1, 0, 1], fp = [1, 0, 1, 0]. # In the forth label (Bicycle), tp = [], fp = []. result = _create_dummy_results() parsed_results = dataset.evaluate(result) assert np.isclose(parsed_results['mAP'], 0.8333, 1e-4) tmp_dir.cleanup() def test_openimages_challenge_dataset(): # create fake ann files tmp_dir = tempfile.TemporaryDirectory() ann_file = osp.join(tmp_dir.name, 'ann_file.txt') label_file = osp.join(tmp_dir.name, 'label_file.csv') label_level_file = osp.join(tmp_dir.name, 'label_level_file.csv') _creat_oid_challenge_style_ann(ann_file, label_file, label_level_file) dataset = OpenImagesChallengeDataset( ann_file=ann_file, label_file=label_file, load_image_level_labels=False, get_supercategory=False, pipeline=[]) ann = dataset.get_ann_info(0) # two legal detection bboxes with `group_of` parameter assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == \ ann['gt_is_group_ofs'].shape[0] == 2 dataset.prepare_train_img(0) dataset.prepare_test_img(0) meta_file = osp.join(tmp_dir.name, 'meta.pkl') _create_metas(meta_file) result = _create_dummy_results() with pytest.raises(AssertionError): fake_json = osp.join(tmp_dir.name, 'hierarchy.json') OpenImagesChallengeDataset( ann_file=ann_file, label_file=label_file, image_level_ann_file=label_level_file, hierarchy_file=fake_json, meta_file=meta_file, pipeline=[]) hierarchy_file = osp.join(tmp_dir.name, 'hierarchy.np') _create_hierarchy_np(hierarchy_file) dataset = OpenImagesChallengeDataset( ann_file=ann_file, label_file=label_file, image_level_ann_file=label_level_file, hierarchy_file=hierarchy_file, meta_file=meta_file, pipeline=[]) dataset.evaluate(result) tmp_dir.cleanup() ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_datasets/test_panoptic_dataset.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile import mmcv import numpy as np from mmdet.core import encode_mask_results from mmdet.datasets.api_wrappers import pq_compute_single_core from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET, CocoPanopticDataset try: from panopticapi.utils import id2rgb except ImportError: id2rgb = None def _create_panoptic_style_json(json_name): image1 = { 'id': 0, 'width': 640, 'height': 640, 'file_name': 'fake_name1.jpg', } image2 = { 'id': 1, 'width': 640, 'height': 800, 'file_name': 'fake_name2.jpg', } images = [image1, image2] annotations = [ { 'segments_info': [{ 'id': 1, 'category_id': 0, 'area': 400, 'bbox': [50, 60, 20, 20], 'iscrowd': 0 }, { 'id': 2, 'category_id': 1, 'area': 900, 'bbox': [100, 120, 30, 30], 'iscrowd': 0 }, { 'id': 3, 'category_id': 2, 'iscrowd': 0, 'bbox': [1, 189, 612, 285], 'area': 70036 }], 'file_name': 'fake_name1.jpg', 'image_id': 0 }, { 'segments_info': [ { # Different to instance style json, there # are duplicate ids in panoptic style json 'id': 1, 'category_id': 0, 'area': 400, 'bbox': [50, 60, 20, 20], 'iscrowd': 0 }, { 'id': 4, 'category_id': 1, 'area': 900, 'bbox': [100, 120, 30, 30], 'iscrowd': 1 }, { 'id': 5, 'category_id': 2, 'iscrowd': 0, 'bbox': [100, 200, 200, 300], 'area': 66666 }, { 'id': 6, 'category_id': 0, 'iscrowd': 0, 'bbox': [1, 189, -10, 285], 'area': 70036 } ], 'file_name': 'fake_name2.jpg', 'image_id': 1 } ] categories = [{ 'id': 0, 'name': 'car', 'supercategory': 'car', 'isthing': 1 }, { 'id': 1, 'name': 'person', 'supercategory': 'person', 'isthing': 1 }, { 'id': 2, 'name': 'wall', 'supercategory': 'wall', 'isthing': 0 }] fake_json = { 'images': images, 'annotations': annotations, 'categories': categories } mmcv.dump(fake_json, json_name) return fake_json def test_load_panoptic_style_json(): tmp_dir = tempfile.TemporaryDirectory() fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') fake_json = _create_panoptic_style_json(fake_json_file) dataset = CocoPanopticDataset( ann_file=fake_json_file, classes=[cat['name'] for cat in fake_json['categories']], pipeline=[]) ann = dataset.get_ann_info(0) # two legal instances assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 2 # three masks for both foreground and background assert len(ann['masks']) == 3 ann = dataset.get_ann_info(1) # one legal instance, one illegal instance, # one crowd instance and one background mask assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 1 assert ann['bboxes_ignore'].shape[0] == 1 assert len(ann['masks']) == 3 def _create_panoptic_gt_annotations(ann_file): categories = [{ 'id': 0, 'name': 'person', 'supercategory': 'person', 'isthing': 1 }, { 'id': 1, 'name': 'dog', 'supercategory': 'dog', 'isthing': 1 }, { 'id': 2, 'name': 'wall', 'supercategory': 'wall', 'isthing': 0 }] images = [{ 'id': 0, 'width': 80, 'height': 60, 'file_name': 'fake_name1.jpg', }] annotations = [{ 'segments_info': [{ 'id': 1, 'category_id': 0, 'area': 400, 'bbox': [10, 10, 10, 40], 'iscrowd': 0 }, { 'id': 2, 'category_id': 0, 'area': 400, 'bbox': [30, 10, 10, 40], 'iscrowd': 0 }, { 'id': 3, 'category_id': 1, 'iscrowd': 0, 'bbox': [50, 10, 10, 5], 'area': 50 }, { 'id': 4, 'category_id': 2, 'iscrowd': 0, 'bbox': [0, 0, 80, 60], 'area': 3950 }], 'file_name': 'fake_name1.png', 'image_id': 0 }] gt_json = { 'images': images, 'annotations': annotations, 'categories': categories } # 4 is the id of the background class annotation. gt = np.zeros((60, 80), dtype=np.int64) + 4 gt_bboxes = np.array([[10, 10, 10, 40], [30, 10, 10, 40], [50, 10, 10, 5]], dtype=np.int64) for i in range(3): x, y, w, h = gt_bboxes[i] gt[y:y + h, x:x + w] = i + 1 # id starts from 1 gt = id2rgb(gt).astype(np.uint8) img_path = osp.join(osp.dirname(ann_file), 'fake_name1.png') mmcv.imwrite(gt[:, :, ::-1], img_path) mmcv.dump(gt_json, ann_file) return gt_json def test_panoptic_evaluation(): if id2rgb is None: return # TP for background class, IoU=3576/4324=0.827 # 2 the category id of the background class pred = np.zeros((60, 80), dtype=np.int64) + 2 pred_bboxes = np.array( [ [11, 11, 10, 40], # TP IoU=351/449=0.78 [38, 10, 10, 40], # FP [51, 10, 10, 5] ], # TP IoU=45/55=0.818 dtype=np.int64) pred_labels = np.array([0, 0, 1], dtype=np.int64) for i in range(3): x, y, w, h = pred_bboxes[i] pred[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + pred_labels[i] tmp_dir = tempfile.TemporaryDirectory() ann_file = osp.join(tmp_dir.name, 'panoptic.json') gt_json = _create_panoptic_gt_annotations(ann_file) results = [{'pan_results': pred}] dataset = CocoPanopticDataset( ann_file=ann_file, seg_prefix=tmp_dir.name, classes=[cat['name'] for cat in gt_json['categories']], pipeline=[]) # For 'person', sq = 0.78 / 1, rq = 1 / 2( 1 tp + 0.5 * (1 fn + 1 fp)) # For 'dog', sq = 0.818, rq = 1 / 1 # For 'wall', sq = 0.827, rq = 1 / 1 # Here is the results for all classes: # +--------+--------+--------+---------+------------+ # | | PQ | SQ | RQ | categories | # +--------+--------+--------+---------+------------+ # | All | 67.869 | 80.898 | 83.333 | 3 | # | Things | 60.453 | 79.996 | 75.000 | 2 | # | Stuff | 82.701 | 82.701 | 100.000 | 1 | # +--------+--------+--------+---------+------------+ parsed_results = dataset.evaluate(results) assert np.isclose(parsed_results['PQ'], 67.869) assert np.isclose(parsed_results['SQ'], 80.898) assert np.isclose(parsed_results['RQ'], 83.333) assert np.isclose(parsed_results['PQ_th'], 60.453) assert np.isclose(parsed_results['SQ_th'], 79.996) assert np.isclose(parsed_results['RQ_th'], 75.000) assert np.isclose(parsed_results['PQ_st'], 82.701) assert np.isclose(parsed_results['SQ_st'], 82.701) assert np.isclose(parsed_results['RQ_st'], 100.000) # test jsonfile_prefix outfile_prefix = osp.join(tmp_dir.name, 'results') parsed_results = dataset.evaluate(results, jsonfile_prefix=outfile_prefix) assert np.isclose(parsed_results['PQ'], 67.869) assert np.isclose(parsed_results['SQ'], 80.898) assert np.isclose(parsed_results['RQ'], 83.333) assert np.isclose(parsed_results['PQ_th'], 60.453) assert np.isclose(parsed_results['SQ_th'], 79.996) assert np.isclose(parsed_results['RQ_th'], 75.000) assert np.isclose(parsed_results['PQ_st'], 82.701) assert np.isclose(parsed_results['SQ_st'], 82.701) assert np.isclose(parsed_results['RQ_st'], 100.000) # test classwise parsed_results = dataset.evaluate(results, classwise=True) assert np.isclose(parsed_results['PQ'], 67.869) assert np.isclose(parsed_results['SQ'], 80.898) assert np.isclose(parsed_results['RQ'], 83.333) assert np.isclose(parsed_results['PQ_th'], 60.453) assert np.isclose(parsed_results['SQ_th'], 79.996) assert np.isclose(parsed_results['RQ_th'], 75.000) assert np.isclose(parsed_results['PQ_st'], 82.701) assert np.isclose(parsed_results['SQ_st'], 82.701) assert np.isclose(parsed_results['RQ_st'], 100.000) # test the api wrapper of `pq_compute_single_core` # Codes are copied from `coco_panoptic.py` and modified result_files, _ = dataset.format_results( results, jsonfile_prefix=outfile_prefix) imgs = dataset.coco.imgs gt_json = dataset.coco.img_ann_map # image to annotations gt_json = [{ 'image_id': k, 'segments_info': v, 'file_name': imgs[k]['segm_file'] } for k, v in gt_json.items()] pred_json = mmcv.load(result_files['panoptic']) pred_json = dict((el['image_id'], el) for el in pred_json['annotations']) # match the gt_anns and pred_anns in the same image matched_annotations_list = [] for gt_ann in gt_json: img_id = gt_ann['image_id'] matched_annotations_list.append((gt_ann, pred_json[img_id])) gt_folder = dataset.seg_prefix pred_folder = osp.join(osp.dirname(outfile_prefix), 'panoptic') pq_stat = pq_compute_single_core(0, matched_annotations_list, gt_folder, pred_folder, dataset.categories) pq_all = pq_stat.pq_average(dataset.categories, isthing=None)[0] assert np.isclose(pq_all['pq'] * 100, 67.869) assert np.isclose(pq_all['sq'] * 100, 80.898) assert np.isclose(pq_all['rq'] * 100, 83.333) assert pq_all['n'] == 3 def _create_instance_segmentation_gt_annotations(ann_file): categories = [{ 'id': 0, 'name': 'person', 'supercategory': 'person', 'isthing': 1 }, { 'id': 1, 'name': 'dog', 'supercategory': 'dog', 'isthing': 1 }, { 'id': 2, 'name': 'wall', 'supercategory': 'wall', 'isthing': 0 }] images = [{ 'id': 0, 'width': 80, 'height': 60, 'file_name': 'fake_name1.jpg', }] person1_polygon = [10, 10, 20, 10, 20, 50, 10, 50, 10, 10] person2_polygon = [30, 10, 40, 10, 40, 50, 30, 50, 30, 10] dog_polygon = [50, 10, 60, 10, 60, 15, 50, 15, 50, 10] annotations = [ { 'id': 0, 'image_id': 0, 'category_id': 0, 'segmentation': [person1_polygon], 'area': 400, 'bbox': [10, 10, 10, 40], 'iscrowd': 0 }, { 'id': 1, 'image_id': 0, 'category_id': 0, 'segmentation': [person2_polygon], 'area': 400, 'bbox': [30, 10, 10, 40], 'iscrowd': 0 }, { 'id': 2, 'image_id': 0, 'category_id': 1, 'segmentation': [dog_polygon], 'area': 50, 'bbox': [50, 10, 10, 5], 'iscrowd': 0 }, ] gt_json = { 'images': images, 'annotations': annotations, 'categories': categories } mmcv.dump(gt_json, ann_file) def test_instance_segmentation_evaluation(): pred_bbox = [ np.array([[11, 10, 20, 50, 0.8], [31, 10, 40, 50, 0.8]]), np.array([[51, 10, 60, 15, 0.7]]) ] person1_mask = np.zeros((60, 80), dtype=bool) person1_mask[20:50, 11:20] = True person2_mask = np.zeros((60, 80), dtype=bool) person2_mask[20:50, 31:40] = True dog_mask = np.zeros((60, 80), dtype=bool) dog_mask[10:15, 51:60] = True pred_mask = [[person1_mask, person2_mask], [ dog_mask, ]] results = [{'ins_results': (pred_bbox, encode_mask_results(pred_mask))}] tmp_dir = tempfile.TemporaryDirectory() pan_ann_file = osp.join(tmp_dir.name, 'panoptic.json') ins_ann_file = osp.join(tmp_dir.name, 'instance.json') _create_panoptic_gt_annotations(pan_ann_file) _create_instance_segmentation_gt_annotations(ins_ann_file) dataset = CocoPanopticDataset( ann_file=pan_ann_file, ins_ann_file=ins_ann_file, seg_prefix=tmp_dir.name, pipeline=[]) dataset.THING_CLASSES = ['person', 'dog'] dataset.STUFF_CLASSES = ['wall'] dataset.CLASSES = dataset.THING_CLASSES + dataset.STUFF_CLASSES parsed_results = dataset.evaluate(results, metric=['segm', 'bbox']) # Here is the results for instance segmentation: # { # 'segm_mAP': 0.5, 'segm_mAP_50': 0.626, 'segm_mAP_75': 0.5, # 'segm_mAP_s': 0.5, 'segm_mAP_m': -1.0, 'segm_mAP_l': -1.0, # 'segm_mAP_copypaste': '0.500 0.626 0.500 0.500 -1.000 -1.000', # 'bbox_mAP': 0.564, 'bbox_mAP_50': 0.626, 'bbox_mAP_75': 0.626, # 'bbox_mAP_s': 0.564, 'bbox_mAP_m': -1.0, 'bbox_mAP_l': -1.0, # 'bbox_mAP_copypaste': '0.564 0.626 0.626 0.564 -1.000 -1.000' # } assert np.isclose(parsed_results['segm_mAP'], 0.5) assert np.isclose(parsed_results['bbox_mAP'], 0.564) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_datasets/test_xml_dataset.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest from mmdet.datasets import DATASETS def test_xml_dataset(): dataconfig = { 'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt', 'img_prefix': 'data/VOCdevkit/VOC2007/', 'pipeline': [{ 'type': 'LoadImageFromFile' }] } XMLDataset = DATASETS.get('XMLDataset') class XMLDatasetSubClass(XMLDataset): CLASSES = None # get_ann_info and _filter_imgs of XMLDataset # would use self.CLASSES, we added CLASSES not NONE with pytest.raises(AssertionError): XMLDatasetSubClass(**dataconfig) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_formatting.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from mmcv.utils import build_from_cfg from mmdet.datasets.builder import PIPELINES def test_default_format_bundle(): results = dict( img_prefix=osp.join(osp.dirname(__file__), '../../data'), img_info=dict(filename='color.jpg')) load = dict(type='LoadImageFromFile') load = build_from_cfg(load, PIPELINES) bundle = dict(type='DefaultFormatBundle') bundle = build_from_cfg(bundle, PIPELINES) results = load(results) assert 'pad_shape' not in results assert 'scale_factor' not in results assert 'img_norm_cfg' not in results results = bundle(results) assert 'pad_shape' in results assert 'scale_factor' in results assert 'img_norm_cfg' in results ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_loading.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import mmcv import numpy as np import pytest from mmdet.core.mask import BitmapMasks, PolygonMasks from mmdet.datasets.pipelines import (FilterAnnotations, LoadImageFromFile, LoadImageFromWebcam, LoadMultiChannelImageFromFiles) class TestLoading: @classmethod def setup_class(cls): cls.data_prefix = osp.join(osp.dirname(__file__), '../../data') def test_load_img(self): results = dict( img_prefix=self.data_prefix, img_info=dict(filename='color.jpg')) transform = LoadImageFromFile() results = transform(copy.deepcopy(results)) assert results['filename'] == osp.join(self.data_prefix, 'color.jpg') assert results['ori_filename'] == 'color.jpg' assert results['img'].shape == (288, 512, 3) assert results['img'].dtype == np.uint8 assert results['img_shape'] == (288, 512, 3) assert results['ori_shape'] == (288, 512, 3) assert repr(transform) == transform.__class__.__name__ + \ "(to_float32=False, color_type='color', channel_order='bgr', " + \ "file_client_args={'backend': 'disk'})" # no img_prefix results = dict( img_prefix=None, img_info=dict(filename='tests/data/color.jpg')) transform = LoadImageFromFile() results = transform(copy.deepcopy(results)) assert results['filename'] == 'tests/data/color.jpg' assert results['ori_filename'] == 'tests/data/color.jpg' assert results['img'].shape == (288, 512, 3) # to_float32 transform = LoadImageFromFile(to_float32=True) results = transform(copy.deepcopy(results)) assert results['img'].dtype == np.float32 # gray image results = dict( img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg')) transform = LoadImageFromFile() results = transform(copy.deepcopy(results)) assert results['img'].shape == (288, 512, 3) assert results['img'].dtype == np.uint8 transform = LoadImageFromFile(color_type='unchanged') results = transform(copy.deepcopy(results)) assert results['img'].shape == (288, 512) assert results['img'].dtype == np.uint8 def test_load_multi_channel_img(self): results = dict( img_prefix=self.data_prefix, img_info=dict(filename=['color.jpg', 'color.jpg'])) transform = LoadMultiChannelImageFromFiles() results = transform(copy.deepcopy(results)) assert results['filename'] == [ osp.join(self.data_prefix, 'color.jpg'), osp.join(self.data_prefix, 'color.jpg') ] assert results['ori_filename'] == ['color.jpg', 'color.jpg'] assert results['img'].shape == (288, 512, 3, 2) assert results['img'].dtype == np.uint8 assert results['img_shape'] == (288, 512, 3, 2) assert results['ori_shape'] == (288, 512, 3, 2) assert results['pad_shape'] == (288, 512, 3, 2) assert results['scale_factor'] == 1.0 assert repr(transform) == transform.__class__.__name__ + \ "(to_float32=False, color_type='unchanged', " + \ "file_client_args={'backend': 'disk'})" def test_load_webcam_img(self): img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg')) results = dict(img=img) transform = LoadImageFromWebcam() results = transform(copy.deepcopy(results)) assert results['filename'] is None assert results['ori_filename'] is None assert results['img'].shape == (288, 512, 3) assert results['img'].dtype == np.uint8 assert results['img_shape'] == (288, 512, 3) assert results['ori_shape'] == (288, 512, 3) def _build_filter_annotations_args(): kwargs = (dict(min_gt_bbox_wh=(100, 100)), dict(min_gt_bbox_wh=(100, 100), keep_empty=False), dict(min_gt_bbox_wh=(1, 1)), dict(min_gt_bbox_wh=(.01, .01)), dict(min_gt_bbox_wh=(.01, .01), by_mask=True), dict(by_mask=True), dict(by_box=False, by_mask=True)) targets = (None, 0, 1, 2, 1, 1, 1) return list(zip(targets, kwargs)) @pytest.mark.parametrize('target, kwargs', _build_filter_annotations_args()) def test_filter_annotations(target, kwargs): filter_ann = FilterAnnotations(**kwargs) bboxes = np.array([[2., 10., 4., 14.], [2., 10., 2.1, 10.1]]) raw_masks = np.zeros((2, 24, 24)) raw_masks[0, 10:14, 2:4] = 1 bitmap_masks = BitmapMasks(raw_masks, 24, 24) results = dict(gt_bboxes=bboxes, gt_masks=bitmap_masks) results = filter_ann(results) if results is not None: results = results['gt_bboxes'].shape[0] assert results == target polygons = [[np.array([2.0, 10.0, 4.0, 10.0, 4.0, 14.0, 2.0, 14.0])], [np.array([2.0, 10.0, 2.1, 10.0, 2.1, 10.1, 2.0, 10.1])]] polygon_masks = PolygonMasks(polygons, 24, 24) results = dict(gt_bboxes=bboxes, gt_masks=polygon_masks) results = filter_ann(results) if results is not None: results = len(results.get('gt_masks').masks) assert results == target ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_sampler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core.bbox.assigners import MaxIoUAssigner from mmdet.core.bbox.samplers import (OHEMSampler, RandomSampler, ScoreHLRSampler) def test_random_sampler(): assigner = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_labels = torch.LongTensor([1, 2]) gt_bboxes_ignore = torch.Tensor([ [30, 30, 40, 40], ]) assign_result = assigner.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels) sampler = RandomSampler( num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True) sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) def test_random_sampler_empty_gt(): assigner = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.empty(0, 4) gt_labels = torch.empty(0, ).long() assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels) sampler = RandomSampler( num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True) sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) def test_random_sampler_empty_pred(): assigner = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False, ) bboxes = torch.empty(0, 4) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_labels = torch.LongTensor([1, 2]) assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels) sampler = RandomSampler( num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True) sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) def _context_for_ohem(): import sys from os.path import dirname sys.path.insert(0, dirname(dirname(dirname(__file__)))) from test_models.test_forward import _get_detector_cfg model = _get_detector_cfg( 'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py') model['pretrained'] = None from mmdet.models import build_detector context = build_detector(model).roi_head return context def test_ohem_sampler(): assigner = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_labels = torch.LongTensor([1, 2]) gt_bboxes_ignore = torch.Tensor([ [30, 30, 40, 40], ]) assign_result = assigner.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels) context = _context_for_ohem() sampler = OHEMSampler( num=10, pos_fraction=0.5, context=context, neg_pos_ub=-1, add_gt_as_proposals=True) feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]] sample_result = sampler.sample( assign_result, bboxes, gt_bboxes, gt_labels, feats=feats) assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) def test_ohem_sampler_empty_gt(): assigner = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.empty(0, 4) gt_labels = torch.LongTensor([]) gt_bboxes_ignore = torch.Tensor([]) assign_result = assigner.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels) context = _context_for_ohem() sampler = OHEMSampler( num=10, pos_fraction=0.5, context=context, neg_pos_ub=-1, add_gt_as_proposals=True) feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]] sample_result = sampler.sample( assign_result, bboxes, gt_bboxes, gt_labels, feats=feats) assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) def test_ohem_sampler_empty_pred(): assigner = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False, ) bboxes = torch.empty(0, 4) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_labels = torch.LongTensor([1, 2, 2, 3]) gt_bboxes_ignore = torch.Tensor([]) assign_result = assigner.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels) context = _context_for_ohem() sampler = OHEMSampler( num=10, pos_fraction=0.5, context=context, neg_pos_ub=-1, add_gt_as_proposals=True) feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]] sample_result = sampler.sample( assign_result, bboxes, gt_bboxes, gt_labels, feats=feats) assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) def test_random_sample_result(): from mmdet.core.bbox.samplers.sampling_result import SamplingResult SamplingResult.random(num_gts=0, num_preds=0) SamplingResult.random(num_gts=0, num_preds=3) SamplingResult.random(num_gts=3, num_preds=3) SamplingResult.random(num_gts=0, num_preds=3) SamplingResult.random(num_gts=7, num_preds=7) SamplingResult.random(num_gts=7, num_preds=64) SamplingResult.random(num_gts=24, num_preds=3) for i in range(3): SamplingResult.random(rng=i) def test_score_hlr_sampler_empty_pred(): assigner = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False, ) context = _context_for_ohem() sampler = ScoreHLRSampler( num=10, pos_fraction=0.5, context=context, neg_pos_ub=-1, add_gt_as_proposals=True) gt_bboxes_ignore = torch.Tensor([]) feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]] # empty bbox bboxes = torch.empty(0, 4) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_labels = torch.LongTensor([1, 2, 2, 3]) assign_result = assigner.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels) sample_result, _ = sampler.sample( assign_result, bboxes, gt_bboxes, gt_labels, feats=feats) assert len(sample_result.neg_inds) == 0 assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) # empty gt bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.empty(0, 4) gt_labels = torch.LongTensor([]) assign_result = assigner.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels) sample_result, _ = sampler.sample( assign_result, bboxes, gt_bboxes, gt_labels, feats=feats) assert len(sample_result.pos_inds) == 0 assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) # non-empty input bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_labels = torch.LongTensor([1, 2, 2, 3]) assign_result = assigner.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels) sample_result, _ = sampler.sample( assign_result, bboxes, gt_bboxes, gt_labels, feats=feats) assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds) assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_transform/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .utils import check_result_same, construct_toy_data, create_random_bboxes __all__ = ['create_random_bboxes', 'construct_toy_data', 'check_result_same'] ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_transform/test_img_augment.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import mmcv import numpy as np from mmcv.utils import build_from_cfg from numpy.testing import assert_array_equal from mmdet.datasets.builder import PIPELINES from .utils import construct_toy_data def test_adjust_color(): results = construct_toy_data() # test wighout aug transform = dict(type='ColorTransform', prob=0, level=10) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], results['img']) # test with factor 1 img = results['img'] transform = dict(type='ColorTransform', prob=1, level=10) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], img) # test with factor 0 transform_module.factor = 0 img_gray = mmcv.bgr2gray(img.copy()) img_r = np.stack([img_gray, img_gray, img_gray], axis=-1) results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], img_r) # test with factor 0.5 transform_module.factor = 0.5 results_transformed = transform_module(copy.deepcopy(results)) img = results['img'] assert_array_equal( results_transformed['img'], np.round(np.clip((img * 0.5 + img_r * 0.5), 0, 255)).astype(img.dtype)) def test_imequalize(nb_rand_test=100): def _imequalize(img): # equalize the image using PIL.ImageOps.equalize from PIL import Image, ImageOps img = Image.fromarray(img) equalized_img = np.asarray(ImageOps.equalize(img)) return equalized_img results = construct_toy_data() # test wighout aug transform = dict(type='EqualizeTransform', prob=0) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], results['img']) # test equalize with case step=0 transform = dict(type='EqualizeTransform', prob=1.) transform_module = build_from_cfg(transform, PIPELINES) img = np.array([[0, 0, 0], [120, 120, 120], [255, 255, 255]], dtype=np.uint8) img = np.stack([img, img, img], axis=-1) results['img'] = img results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], img) # test equalize with randomly sampled image. for _ in range(nb_rand_test): img = np.clip(np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0, 255).astype(np.uint8) results['img'] = img results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], _imequalize(img)) def test_adjust_brightness(nb_rand_test=100): def _adjust_brightness(img, factor): # adjust the brightness of image using # PIL.ImageEnhance.Brightness from PIL import Image from PIL.ImageEnhance import Brightness img = Image.fromarray(img) brightened_img = Brightness(img).enhance(factor) return np.asarray(brightened_img) results = construct_toy_data() # test wighout aug transform = dict(type='BrightnessTransform', level=10, prob=0) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], results['img']) # test case with factor 1.0 transform = dict(type='BrightnessTransform', level=10, prob=1.) transform_module = build_from_cfg(transform, PIPELINES) transform_module.factor = 1.0 results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], results['img']) # test case with factor 0.0 transform_module.factor = 0.0 results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], np.zeros_like(results['img'])) # test with randomly sampled images and factors. for _ in range(nb_rand_test): img = np.clip(np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0, 255).astype(np.uint8) factor = np.random.uniform() transform_module.factor = factor results['img'] = img np.testing.assert_allclose( transform_module(copy.deepcopy(results))['img'].astype(np.int32), _adjust_brightness(img, factor).astype(np.int32), rtol=0, atol=1) def test_adjust_contrast(nb_rand_test=100): def _adjust_contrast(img, factor): from PIL import Image from PIL.ImageEnhance import Contrast # Image.fromarray defaultly supports RGB, not BGR. # convert from BGR to RGB img = Image.fromarray(img[..., ::-1], mode='RGB') contrasted_img = Contrast(img).enhance(factor) # convert from RGB to BGR return np.asarray(contrasted_img)[..., ::-1] results = construct_toy_data() # test wighout aug transform = dict(type='ContrastTransform', level=10, prob=0) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], results['img']) # test case with factor 1.0 transform = dict(type='ContrastTransform', level=10, prob=1.) transform_module = build_from_cfg(transform, PIPELINES) transform_module.factor = 1.0 results_transformed = transform_module(copy.deepcopy(results)) assert_array_equal(results_transformed['img'], results['img']) # test case with factor 0.0 transform_module.factor = 0.0 results_transformed = transform_module(copy.deepcopy(results)) np.testing.assert_allclose( results_transformed['img'], _adjust_contrast(results['img'], 0.), rtol=0, atol=1) # test adjust_contrast with randomly sampled images and factors. for _ in range(nb_rand_test): img = np.clip(np.random.uniform(0, 1, (1200, 1000, 3)) * 260, 0, 255).astype(np.uint8) factor = np.random.uniform() transform_module.factor = factor results['img'] = img results_transformed = transform_module(copy.deepcopy(results)) # Note the gap (less_equal 1) between PIL.ImageEnhance.Contrast # and mmcv.adjust_contrast comes from the gap that converts from # a color image to gray image using mmcv or PIL. np.testing.assert_allclose( transform_module(copy.deepcopy(results))['img'].astype(np.int32), _adjust_contrast(results['img'], factor).astype(np.int32), rtol=0, atol=1) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_transform/test_models_aug_test.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import mmcv import torch from mmcv.parallel import collate from mmcv.utils import build_from_cfg from mmdet.datasets.builder import PIPELINES from mmdet.models import build_detector def model_aug_test_template(cfg_file): # get config cfg = mmcv.Config.fromfile(cfg_file) # init model cfg.model.pretrained = None cfg.model.train_cfg = None model = build_detector(cfg.model) # init test pipeline and set aug test load_cfg, multi_scale_cfg = cfg.test_pipeline multi_scale_cfg['flip'] = True multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal'] multi_scale_cfg['img_scale'] = [(1333, 800), (800, 600), (640, 480)] load = build_from_cfg(load_cfg, PIPELINES) transform = build_from_cfg(multi_scale_cfg, PIPELINES) results = dict( img_prefix=osp.join(osp.dirname(__file__), '../../../data'), img_info=dict(filename='color.jpg')) results = transform(load(results)) assert len(results['img']) == 12 assert len(results['img_metas']) == 12 results['img'] = [collate([x]) for x in results['img']] results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']] # aug test the model model.eval() with torch.no_grad(): aug_result = model(return_loss=False, rescale=True, **results) return aug_result def test_aug_test_size(): results = dict( img_prefix=osp.join(osp.dirname(__file__), '../../../data'), img_info=dict(filename='color.jpg')) # Define simple pipeline load = dict(type='LoadImageFromFile') load = build_from_cfg(load, PIPELINES) # get config transform = dict( type='MultiScaleFlipAug', transforms=[], img_scale=[(1333, 800), (800, 600), (640, 480)], flip=True, flip_direction=['horizontal', 'vertical', 'diagonal']) multi_aug_test_module = build_from_cfg(transform, PIPELINES) results = load(results) results = multi_aug_test_module(load(results)) # len(["original", "horizontal", "vertical", "diagonal"]) * # len([(1333, 800), (800, 600), (640, 480)]) assert len(results['img']) == 12 def test_cascade_rcnn_aug_test(): aug_result = model_aug_test_template( 'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py') assert len(aug_result[0]) == 80 def test_mask_rcnn_aug_test(): aug_result = model_aug_test_template( 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py') assert len(aug_result[0]) == 2 assert len(aug_result[0][0]) == 80 assert len(aug_result[0][1]) == 80 def test_htc_aug_test(): aug_result = model_aug_test_template('configs/htc/htc_r50_fpn_1x_coco.py') assert len(aug_result[0]) == 2 assert len(aug_result[0][0]) == 80 assert len(aug_result[0][1]) == 80 def test_scnet_aug_test(): aug_result = model_aug_test_template( 'configs/scnet/scnet_r50_fpn_1x_coco.py') assert len(aug_result[0]) == 2 assert len(aug_result[0][0]) == 80 assert len(aug_result[0][1]) == 80 def test_cornernet_aug_test(): # get config cfg = mmcv.Config.fromfile( 'configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py') # init model cfg.model.pretrained = None cfg.model.train_cfg = None model = build_detector(cfg.model) # init test pipeline and set aug test load_cfg, multi_scale_cfg = cfg.test_pipeline multi_scale_cfg['flip'] = True multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal'] multi_scale_cfg['scale_factor'] = [0.5, 1.0, 2.0] load = build_from_cfg(load_cfg, PIPELINES) transform = build_from_cfg(multi_scale_cfg, PIPELINES) results = dict( img_prefix=osp.join(osp.dirname(__file__), '../../../data'), img_info=dict(filename='color.jpg')) results = transform(load(results)) assert len(results['img']) == 12 assert len(results['img_metas']) == 12 results['img'] = [collate([x]) for x in results['img']] results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']] # aug test the model model.eval() with torch.no_grad(): aug_result = model(return_loss=False, rescale=True, **results) assert len(aug_result[0]) == 80 ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_transform/test_rotate.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import numpy as np import pytest from mmcv.utils import build_from_cfg from mmdet.core.mask import BitmapMasks, PolygonMasks from mmdet.datasets.builder import PIPELINES from .utils import check_result_same, construct_toy_data def test_rotate(): # test assertion for invalid type of max_rotate_angle with pytest.raises(AssertionError): transform = dict(type='Rotate', level=1, max_rotate_angle=(30, )) build_from_cfg(transform, PIPELINES) # test assertion for invalid type of scale with pytest.raises(AssertionError): transform = dict(type='Rotate', level=2, scale=(1.2, )) build_from_cfg(transform, PIPELINES) # test ValueError for invalid type of img_fill_val with pytest.raises(ValueError): transform = dict( type='Rotate', level=2, img_fill_val=[ 128, ]) build_from_cfg(transform, PIPELINES) # test assertion for invalid number of elements in center with pytest.raises(AssertionError): transform = dict(type='Rotate', level=2, center=(0.5, )) build_from_cfg(transform, PIPELINES) # test assertion for invalid type of center with pytest.raises(AssertionError): transform = dict(type='Rotate', level=2, center=[0, 0]) build_from_cfg(transform, PIPELINES) # test case when no rotate aug (level=0) results = construct_toy_data() img_fill_val = (104, 116, 124) seg_ignore_label = 255 transform = dict( type='Rotate', level=0, prob=1., img_fill_val=img_fill_val, seg_ignore_label=seg_ignore_label, ) rotate_module = build_from_cfg(transform, PIPELINES) results_wo_rotate = rotate_module(copy.deepcopy(results)) check_result_same(results, results_wo_rotate) # test case when no rotate aug (prob<=0) transform = dict( type='Rotate', level=10, prob=0., img_fill_val=img_fill_val, scale=0.6) rotate_module = build_from_cfg(transform, PIPELINES) results_wo_rotate = rotate_module(copy.deepcopy(results)) check_result_same(results, results_wo_rotate) # test clockwise rotation with angle 90 results = construct_toy_data() img_fill_val = 128 transform = dict( type='Rotate', level=10, max_rotate_angle=90, img_fill_val=img_fill_val, # set random_negative_prob to 0 for clockwise rotation random_negative_prob=0., prob=1.) rotate_module = build_from_cfg(transform, PIPELINES) results_rotated = rotate_module(copy.deepcopy(results)) img_r = np.array([[img_fill_val, 6, 2, img_fill_val], [img_fill_val, 7, 3, img_fill_val]]).astype(np.uint8) img_r = np.stack([img_r, img_r, img_r], axis=-1) results_gt = copy.deepcopy(results) results_gt['img'] = img_r results_gt['gt_bboxes'] = np.array([[1., 0., 2., 1.]], dtype=np.float32) results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32) gt_masks = np.array([[0, 1, 1, 0], [0, 0, 1, 0]], dtype=np.uint8)[None, :, :] results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4) results_gt['gt_semantic_seg'] = np.array( [[255, 6, 2, 255], [255, 7, 3, 255]]).astype(results['gt_semantic_seg'].dtype) check_result_same(results_gt, results_rotated) # test clockwise rotation with angle 90, PolygonMasks results = construct_toy_data(poly2mask=False) results_rotated = rotate_module(copy.deepcopy(results)) gt_masks = [[np.array([2, 0, 2, 1, 1, 1, 1, 0], dtype=np.float)]] results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4) check_result_same(results_gt, results_rotated) # test counter-clockwise rotation with angle 90, # and specify the ratation center img_fill_val = (104, 116, 124) transform = dict( type='Rotate', level=10, max_rotate_angle=90, center=(0, 0), img_fill_val=img_fill_val, # set random_negative_prob to 1 for counter-clockwise rotation random_negative_prob=1., prob=1.) results = construct_toy_data() rotate_module = build_from_cfg(transform, PIPELINES) results_rotated = rotate_module(copy.deepcopy(results)) results_gt = copy.deepcopy(results) h, w = results['img'].shape[:2] img_r = np.stack([ np.ones((h, w)) * img_fill_val[0], np.ones((h, w)) * img_fill_val[1], np.ones((h, w)) * img_fill_val[2] ], axis=-1).astype(np.uint8) img_r[0, 0, :] = 1 img_r[0, 1, :] = 5 results_gt['img'] = img_r results_gt['gt_bboxes'] = np.empty((0, 4), dtype=np.float32) results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32) results_gt['gt_labels'] = np.empty((0, ), dtype=np.int64) gt_masks = np.empty((0, h, w), dtype=np.uint8) results_gt['gt_masks'] = BitmapMasks(gt_masks, h, w) gt_seg = (np.ones((h, w)) * 255).astype(results['gt_semantic_seg'].dtype) gt_seg[0, 0], gt_seg[0, 1] = 1, 5 results_gt['gt_semantic_seg'] = gt_seg check_result_same(results_gt, results_rotated) transform = dict( type='Rotate', level=10, max_rotate_angle=90, center=(0), img_fill_val=img_fill_val, random_negative_prob=1., prob=1.) rotate_module = build_from_cfg(transform, PIPELINES) results_rotated = rotate_module(copy.deepcopy(results)) check_result_same(results_gt, results_rotated) # test counter-clockwise rotation with angle 90, # and specify the ratation center, PolygonMasks results = construct_toy_data(poly2mask=False) results_rotated = rotate_module(copy.deepcopy(results)) gt_masks = [[np.array([0, 0, 0, 0, 1, 0, 1, 0], dtype=np.float)]] results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4) check_result_same(results_gt, results_rotated) # test AutoAugment equipped with Rotate policies = [[dict(type='Rotate', level=10, prob=1.)]] autoaug = dict(type='AutoAugment', policies=policies) autoaug_module = build_from_cfg(autoaug, PIPELINES) autoaug_module(copy.deepcopy(results)) policies = [[ dict(type='Rotate', level=10, prob=1.), dict( type='Rotate', level=8, max_rotate_angle=90, center=(0), img_fill_val=img_fill_val) ]] autoaug = dict(type='AutoAugment', policies=policies) autoaug_module = build_from_cfg(autoaug, PIPELINES) autoaug_module(copy.deepcopy(results)) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_transform/test_shear.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import numpy as np import pytest from mmcv.utils import build_from_cfg from mmdet.core.mask import BitmapMasks, PolygonMasks from mmdet.datasets.builder import PIPELINES from .utils import check_result_same, construct_toy_data def test_shear(): # test assertion for invalid type of max_shear_magnitude with pytest.raises(AssertionError): transform = dict(type='Shear', level=1, max_shear_magnitude=(0.5, )) build_from_cfg(transform, PIPELINES) # test assertion for invalid value of max_shear_magnitude with pytest.raises(AssertionError): transform = dict(type='Shear', level=2, max_shear_magnitude=1.2) build_from_cfg(transform, PIPELINES) # test ValueError for invalid type of img_fill_val with pytest.raises(ValueError): transform = dict(type='Shear', level=2, img_fill_val=[128]) build_from_cfg(transform, PIPELINES) results = construct_toy_data() # test case when no shear aug (level=0, direction='horizontal') img_fill_val = (104, 116, 124) seg_ignore_label = 255 transform = dict( type='Shear', level=0, prob=1., img_fill_val=img_fill_val, seg_ignore_label=seg_ignore_label, direction='horizontal') shear_module = build_from_cfg(transform, PIPELINES) results_wo_shear = shear_module(copy.deepcopy(results)) check_result_same(results, results_wo_shear) # test case when no shear aug (level=0, direction='vertical') transform = dict( type='Shear', level=0, prob=1., img_fill_val=img_fill_val, seg_ignore_label=seg_ignore_label, direction='vertical') shear_module = build_from_cfg(transform, PIPELINES) results_wo_shear = shear_module(copy.deepcopy(results)) check_result_same(results, results_wo_shear) # test case when no shear aug (prob<=0) transform = dict( type='Shear', level=10, prob=0., img_fill_val=img_fill_val, direction='vertical') shear_module = build_from_cfg(transform, PIPELINES) results_wo_shear = shear_module(copy.deepcopy(results)) check_result_same(results, results_wo_shear) # test shear horizontally, magnitude=1 transform = dict( type='Shear', level=10, prob=1., img_fill_val=img_fill_val, direction='horizontal', max_shear_magnitude=1., random_negative_prob=0.) shear_module = build_from_cfg(transform, PIPELINES) results_sheared = shear_module(copy.deepcopy(results)) results_gt = copy.deepcopy(results) img_s = np.array([[1, 2, 3, 4], [0, 5, 6, 7]], dtype=np.uint8) img_s = np.stack([img_s, img_s, img_s], axis=-1) img_s[1, 0, :] = np.array(img_fill_val) results_gt['img'] = img_s results_gt['gt_bboxes'] = np.array([[0., 0., 3., 1.]], dtype=np.float32) results_gt['gt_bboxes_ignore'] = np.array([[2., 0., 4., 1.]], dtype=np.float32) gt_masks = np.array([[0, 1, 1, 0], [0, 0, 1, 0]], dtype=np.uint8)[None, :, :] results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4) results_gt['gt_semantic_seg'] = np.array( [[1, 2, 3, 4], [255, 5, 6, 7]], dtype=results['gt_semantic_seg'].dtype) check_result_same(results_gt, results_sheared) # test PolygonMasks with shear horizontally, magnitude=1 results = construct_toy_data(poly2mask=False) results_sheared = shear_module(copy.deepcopy(results)) print(results_sheared['gt_masks']) gt_masks = [[np.array([0, 0, 2, 0, 3, 1, 1, 1], dtype=np.float)]] results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4) check_result_same(results_gt, results_sheared) # test shear vertically, magnitude=-1 img_fill_val = 128 results = construct_toy_data() transform = dict( type='Shear', level=10, prob=1., img_fill_val=img_fill_val, direction='vertical', max_shear_magnitude=1., random_negative_prob=1.) shear_module = build_from_cfg(transform, PIPELINES) results_sheared = shear_module(copy.deepcopy(results)) results_gt = copy.deepcopy(results) img_s = np.array([[1, 6, img_fill_val, img_fill_val], [5, img_fill_val, img_fill_val, img_fill_val]], dtype=np.uint8) img_s = np.stack([img_s, img_s, img_s], axis=-1) results_gt['img'] = img_s results_gt['gt_bboxes'] = np.empty((0, 4), dtype=np.float32) results_gt['gt_labels'] = np.empty((0, ), dtype=np.int64) results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32) gt_masks = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.uint8)[None, :, :] results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4) results_gt['gt_semantic_seg'] = np.array( [[1, 6, 255, 255], [5, 255, 255, 255]], dtype=results['gt_semantic_seg'].dtype) check_result_same(results_gt, results_sheared) # test PolygonMasks with shear vertically, magnitude=-1 results = construct_toy_data(poly2mask=False) results_sheared = shear_module(copy.deepcopy(results)) gt_masks = [[np.array([0, 0, 2, 0, 2, 0, 0, 1], dtype=np.float)]] results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4) check_result_same(results_gt, results_sheared) results = construct_toy_data() # same mask for BitmapMasks and PolygonMasks results['gt_masks'] = BitmapMasks( np.array([[0, 1, 1, 0], [0, 1, 1, 0]], dtype=np.uint8)[None, :, :], 2, 4) results['gt_bboxes'] = np.array([[1., 0., 2., 1.]], dtype=np.float32) results_sheared_bitmap = shear_module(copy.deepcopy(results)) check_result_same(results_sheared_bitmap, results_sheared) # test AutoAugment equipped with Shear policies = [[dict(type='Shear', level=10, prob=1.)]] autoaug = dict(type='AutoAugment', policies=policies) autoaug_module = build_from_cfg(autoaug, PIPELINES) autoaug_module(copy.deepcopy(results)) policies = [[ dict(type='Shear', level=10, prob=1.), dict( type='Shear', level=8, img_fill_val=img_fill_val, direction='vertical', max_shear_magnitude=1.) ]] autoaug = dict(type='AutoAugment', policies=policies) autoaug_module = build_from_cfg(autoaug, PIPELINES) autoaug_module(copy.deepcopy(results)) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_transform/test_transform.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import mmcv import numpy as np import pytest import torch from mmcv.utils import build_from_cfg from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps from mmdet.datasets.builder import PIPELINES from .utils import create_full_masks, create_random_bboxes def test_resize(): # test assertion if img_scale is a list with pytest.raises(AssertionError): transform = dict(type='Resize', img_scale=[1333, 800], keep_ratio=True) build_from_cfg(transform, PIPELINES) # test assertion if len(img_scale) while ratio_range is not None with pytest.raises(AssertionError): transform = dict( type='Resize', img_scale=[(1333, 800), (1333, 600)], ratio_range=(0.9, 1.1), keep_ratio=True) build_from_cfg(transform, PIPELINES) # test assertion for invalid multiscale_mode with pytest.raises(AssertionError): transform = dict( type='Resize', img_scale=[(1333, 800), (1333, 600)], keep_ratio=True, multiscale_mode='2333') build_from_cfg(transform, PIPELINES) # test assertion if both scale and scale_factor are set with pytest.raises(AssertionError): results = dict( img_prefix=osp.join(osp.dirname(__file__), '../../../data'), img_info=dict(filename='color.jpg')) load = dict(type='LoadImageFromFile') load = build_from_cfg(load, PIPELINES) transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True) transform = build_from_cfg(transform, PIPELINES) results = load(results) results['scale'] = (1333, 800) results['scale_factor'] = 1.0 results = transform(results) transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True) resize_module = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img results['img2'] = copy.deepcopy(img) results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['img_fields'] = ['img', 'img2'] results = resize_module(results) assert np.equal(results['img'], results['img2']).all() results.pop('scale') results.pop('scale_factor') transform = dict( type='Resize', img_scale=(1280, 800), multiscale_mode='value', keep_ratio=False) resize_module = build_from_cfg(transform, PIPELINES) results = resize_module(results) assert np.equal(results['img'], results['img2']).all() assert results['img_shape'] == (800, 1280, 3) assert results['img'].dtype == results['img'].dtype == np.uint8 results_seg = { 'img': img, 'img_shape': img.shape, 'ori_shape': img.shape, 'gt_semantic_seg': copy.deepcopy(img), 'gt_seg': copy.deepcopy(img), 'seg_fields': ['gt_semantic_seg', 'gt_seg'] } transform = dict( type='Resize', img_scale=(640, 400), multiscale_mode='value', keep_ratio=False) resize_module = build_from_cfg(transform, PIPELINES) results_seg = resize_module(results_seg) assert results_seg['gt_semantic_seg'].shape == results_seg['gt_seg'].shape assert results_seg['img_shape'] == (400, 640, 3) assert results_seg['img_shape'] != results_seg['ori_shape'] assert results_seg['gt_semantic_seg'].shape == results_seg['img_shape'] assert np.equal(results_seg['gt_semantic_seg'], results_seg['gt_seg']).all() def test_flip(): # test assertion for invalid flip_ratio with pytest.raises(AssertionError): transform = dict(type='RandomFlip', flip_ratio=1.5) build_from_cfg(transform, PIPELINES) # test assertion for 0 <= sum(flip_ratio) <= 1 with pytest.raises(AssertionError): transform = dict( type='RandomFlip', flip_ratio=[0.7, 0.8], direction=['horizontal', 'vertical']) build_from_cfg(transform, PIPELINES) # test assertion for mismatch between number of flip_ratio and direction with pytest.raises(AssertionError): transform = dict(type='RandomFlip', flip_ratio=[0.4, 0.5]) build_from_cfg(transform, PIPELINES) # test assertion for invalid direction with pytest.raises(AssertionError): transform = dict( type='RandomFlip', flip_ratio=1., direction='horizonta') build_from_cfg(transform, PIPELINES) transform = dict(type='RandomFlip', flip_ratio=1.) flip_module = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') original_img = copy.deepcopy(img) results['img'] = img results['img2'] = copy.deepcopy(img) results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results['img_fields'] = ['img', 'img2'] results = flip_module(results) assert np.equal(results['img'], results['img2']).all() flip_module = build_from_cfg(transform, PIPELINES) results = flip_module(results) assert np.equal(results['img'], results['img2']).all() assert np.equal(original_img, results['img']).all() # test flip_ratio is float, direction is list transform = dict( type='RandomFlip', flip_ratio=0.9, direction=['horizontal', 'vertical', 'diagonal']) flip_module = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') original_img = copy.deepcopy(img) results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results['img_fields'] = ['img'] results = flip_module(results) if results['flip']: assert np.array_equal( mmcv.imflip(original_img, results['flip_direction']), results['img']) else: assert np.array_equal(original_img, results['img']) # test flip_ratio is list, direction is list transform = dict( type='RandomFlip', flip_ratio=[0.3, 0.3, 0.2], direction=['horizontal', 'vertical', 'diagonal']) flip_module = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') original_img = copy.deepcopy(img) results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results['img_fields'] = ['img'] results = flip_module(results) if results['flip']: assert np.array_equal( mmcv.imflip(original_img, results['flip_direction']), results['img']) else: assert np.array_equal(original_img, results['img']) def test_random_crop(): # test assertion for invalid random crop with pytest.raises(AssertionError): transform = dict(type='RandomCrop', crop_size=(-1, 0)) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape # TODO: add img_fields test results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 h, w, _ = img.shape gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='RandomCrop', crop_size=(h - 20, w - 20)) crop_module = build_from_cfg(transform, PIPELINES) results = crop_module(results) assert results['img'].shape[:2] == (h - 20, w - 20) # All bboxes should be reserved after crop assert results['img_shape'][:2] == (h - 20, w - 20) assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes'].shape[0] == 8 assert results['gt_bboxes_ignore'].shape[0] == 2 def area(bboxes): return np.prod(bboxes[:, 2:4] - bboxes[:, 0:2], axis=1) assert (area(results['gt_bboxes']) <= area(gt_bboxes)).all() assert (area(results['gt_bboxes_ignore']) <= area(gt_bboxes_ignore)).all() assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32 # test assertion for invalid crop_type with pytest.raises(ValueError): transform = dict( type='RandomCrop', crop_size=(1, 1), crop_type='unknown') build_from_cfg(transform, PIPELINES) # test assertion for invalid crop_size with pytest.raises(AssertionError): transform = dict( type='RandomCrop', crop_type='relative', crop_size=(0, 0)) build_from_cfg(transform, PIPELINES) def _construct_toy_data(): img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8) img = np.stack([img, img, img], axis=-1) results = dict() # image results['img'] = img results['img_shape'] = img.shape results['img_fields'] = ['img'] # bboxes results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] results['gt_bboxes'] = np.array([[0., 0., 2., 1.]], dtype=np.float32) results['gt_bboxes_ignore'] = np.array([[2., 0., 3., 1.]], dtype=np.float32) # labels results['gt_labels'] = np.array([1], dtype=np.int64) return results # test crop_type "relative_range" results = _construct_toy_data() transform = dict( type='RandomCrop', crop_type='relative_range', crop_size=(0.3, 0.7), allow_negative_crop=True) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) h, w = results_transformed['img_shape'][:2] assert int(2 * 0.3 + 0.5) <= h <= int(2 * 1 + 0.5) assert int(4 * 0.7 + 0.5) <= w <= int(4 * 1 + 0.5) assert results_transformed['gt_bboxes'].dtype == np.float32 assert results_transformed['gt_bboxes_ignore'].dtype == np.float32 # test crop_type "relative" transform = dict( type='RandomCrop', crop_type='relative', crop_size=(0.3, 0.7), allow_negative_crop=True) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) h, w = results_transformed['img_shape'][:2] assert h == int(2 * 0.3 + 0.5) and w == int(4 * 0.7 + 0.5) assert results_transformed['gt_bboxes'].dtype == np.float32 assert results_transformed['gt_bboxes_ignore'].dtype == np.float32 # test crop_type "absolute" transform = dict( type='RandomCrop', crop_type='absolute', crop_size=(1, 2), allow_negative_crop=True) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) h, w = results_transformed['img_shape'][:2] assert h == 1 and w == 2 assert results_transformed['gt_bboxes'].dtype == np.float32 assert results_transformed['gt_bboxes_ignore'].dtype == np.float32 # test crop_type "absolute_range" transform = dict( type='RandomCrop', crop_type='absolute_range', crop_size=(1, 20), allow_negative_crop=True) transform_module = build_from_cfg(transform, PIPELINES) results_transformed = transform_module(copy.deepcopy(results)) h, w = results_transformed['img_shape'][:2] assert 1 <= h <= 2 and 1 <= w <= 4 assert results_transformed['gt_bboxes'].dtype == np.float32 assert results_transformed['gt_bboxes_ignore'].dtype == np.float32 def test_min_iou_random_crop(): results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 h, w, _ = img.shape gt_bboxes = create_random_bboxes(1, w, h) gt_bboxes_ignore = create_random_bboxes(1, w, h) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='MinIoURandomCrop') crop_module = build_from_cfg(transform, PIPELINES) # Test for img_fields results_test = copy.deepcopy(results) results_test['img1'] = results_test['img'] results_test['img_fields'] = ['img', 'img1'] with pytest.raises(AssertionError): crop_module(results_test) results = crop_module(results) assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32 patch = np.array([0, 0, results['img_shape'][1], results['img_shape'][0]]) ious = bbox_overlaps(patch.reshape(-1, 4), results['gt_bboxes']).reshape(-1) ious_ignore = bbox_overlaps( patch.reshape(-1, 4), results['gt_bboxes_ignore']).reshape(-1) mode = crop_module.mode if mode == 1: assert np.equal(results['gt_bboxes'], gt_bboxes).all() assert np.equal(results['gt_bboxes_ignore'], gt_bboxes_ignore).all() else: assert (ious >= mode).all() assert (ious_ignore >= mode).all() def test_pad(): # test assertion if both size_divisor and size is None with pytest.raises(AssertionError): transform = dict(type='Pad') build_from_cfg(transform, PIPELINES) transform = dict(type='Pad', size_divisor=32) transform = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') original_img = copy.deepcopy(img) results['img'] = img results['img2'] = copy.deepcopy(img) results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results['img_fields'] = ['img', 'img2'] results = transform(results) assert np.equal(results['img'], results['img2']).all() # original img already divisible by 32 assert np.equal(results['img'], original_img).all() img_shape = results['img'].shape assert img_shape[0] % 32 == 0 assert img_shape[1] % 32 == 0 resize_transform = dict( type='Resize', img_scale=(1333, 800), keep_ratio=True) resize_module = build_from_cfg(resize_transform, PIPELINES) results = resize_module(results) results = transform(results) img_shape = results['img'].shape assert np.equal(results['img'], results['img2']).all() assert img_shape[0] % 32 == 0 assert img_shape[1] % 32 == 0 # test the size and size_divisor must be None when pad2square is True with pytest.raises(AssertionError): transform = dict(type='Pad', size_divisor=32, pad_to_square=True) build_from_cfg(transform, PIPELINES) transform = dict(type='Pad', pad_to_square=True) transform = build_from_cfg(transform, PIPELINES) results['img'] = img results = transform(results) assert results['img'].shape[0] == results['img'].shape[1] # test the pad_val is converted to a dict transform = dict(type='Pad', size_divisor=32, pad_val=0) with pytest.deprecated_call(): transform = build_from_cfg(transform, PIPELINES) assert isinstance(transform.pad_val, dict) results = transform(results) img_shape = results['img'].shape assert img_shape[0] % 32 == 0 assert img_shape[1] % 32 == 0 def test_normalize(): img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) transform = dict(type='Normalize', **img_norm_cfg) transform = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') original_img = copy.deepcopy(img) results['img'] = img results['img2'] = copy.deepcopy(img) results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results['img_fields'] = ['img', 'img2'] results = transform(results) assert np.equal(results['img'], results['img2']).all() mean = np.array(img_norm_cfg['mean']) std = np.array(img_norm_cfg['std']) converted_img = (original_img[..., ::-1] - mean) / std assert np.allclose(results['img'], converted_img) def test_albu_transform(): results = dict( img_prefix=osp.join(osp.dirname(__file__), '../../../data'), img_info=dict(filename='color.jpg')) # Define simple pipeline load = dict(type='LoadImageFromFile') load = build_from_cfg(load, PIPELINES) albu_transform = dict( type='Albu', transforms=[dict(type='ChannelShuffle', p=1)]) albu_transform = build_from_cfg(albu_transform, PIPELINES) normalize = dict(type='Normalize', mean=[0] * 3, std=[0] * 3, to_rgb=True) normalize = build_from_cfg(normalize, PIPELINES) # Execute transforms results = load(results) results = albu_transform(results) results = normalize(results) assert results['img'].dtype == np.float32 def test_random_center_crop_pad(): # test assertion for invalid crop_size while test_mode=False with pytest.raises(AssertionError): transform = dict( type='RandomCenterCropPad', crop_size=(-1, 0), test_mode=False, test_pad_mode=None) build_from_cfg(transform, PIPELINES) # test assertion for invalid ratios while test_mode=False with pytest.raises(AssertionError): transform = dict( type='RandomCenterCropPad', crop_size=(511, 511), ratios=(1.0), test_mode=False, test_pad_mode=None) build_from_cfg(transform, PIPELINES) # test assertion for invalid mean, std and to_rgb with pytest.raises(AssertionError): transform = dict( type='RandomCenterCropPad', crop_size=(511, 511), mean=None, std=None, to_rgb=None, test_mode=False, test_pad_mode=None) build_from_cfg(transform, PIPELINES) # test assertion for invalid crop_size while test_mode=True with pytest.raises(AssertionError): transform = dict( type='RandomCenterCropPad', crop_size=(511, 511), ratios=None, border=None, mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, test_mode=True, test_pad_mode=('logical_or', 127)) build_from_cfg(transform, PIPELINES) # test assertion for invalid ratios while test_mode=True with pytest.raises(AssertionError): transform = dict( type='RandomCenterCropPad', crop_size=None, ratios=(0.9, 1.0, 1.1), border=None, mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, test_mode=True, test_pad_mode=('logical_or', 127)) build_from_cfg(transform, PIPELINES) # test assertion for invalid border while test_mode=True with pytest.raises(AssertionError): transform = dict( type='RandomCenterCropPad', crop_size=None, ratios=None, border=128, mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, test_mode=True, test_pad_mode=('logical_or', 127)) build_from_cfg(transform, PIPELINES) # test assertion for invalid test_pad_mode while test_mode=True with pytest.raises(AssertionError): transform = dict( type='RandomCenterCropPad', crop_size=None, ratios=None, border=None, mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, test_mode=True, test_pad_mode=('do_nothing', 100)) build_from_cfg(transform, PIPELINES) results = dict( img_prefix=osp.join(osp.dirname(__file__), '../../../data'), img_info=dict(filename='color.jpg')) load = dict(type='LoadImageFromFile', to_float32=True) load = build_from_cfg(load, PIPELINES) results = load(results) test_results = copy.deepcopy(results) h, w, _ = results['img_shape'] gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore train_transform = dict( type='RandomCenterCropPad', crop_size=(h - 20, w - 20), ratios=(1.0, ), border=128, mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, test_mode=False, test_pad_mode=None) crop_module = build_from_cfg(train_transform, PIPELINES) train_results = crop_module(results) assert train_results['img'].shape[:2] == (h - 20, w - 20) # All bboxes should be reserved after crop assert train_results['pad_shape'][:2] == (h - 20, w - 20) assert train_results['gt_bboxes'].shape[0] == 8 assert train_results['gt_bboxes_ignore'].shape[0] == 2 assert train_results['gt_bboxes'].dtype == np.float32 assert train_results['gt_bboxes_ignore'].dtype == np.float32 test_transform = dict( type='RandomCenterCropPad', crop_size=None, ratios=None, border=None, mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True, test_mode=True, test_pad_mode=('logical_or', 127)) crop_module = build_from_cfg(test_transform, PIPELINES) test_results = crop_module(test_results) assert test_results['img'].shape[:2] == (h | 127, w | 127) assert test_results['pad_shape'][:2] == (h | 127, w | 127) assert 'border' in test_results def test_multi_scale_flip_aug(): # test assertion if give both scale_factor and img_scale with pytest.raises(AssertionError): transform = dict( type='MultiScaleFlipAug', scale_factor=1.0, img_scale=[(1333, 800)], transforms=[dict(type='Resize')]) build_from_cfg(transform, PIPELINES) # test assertion if both scale_factor and img_scale are None with pytest.raises(AssertionError): transform = dict( type='MultiScaleFlipAug', scale_factor=None, img_scale=None, transforms=[dict(type='Resize')]) build_from_cfg(transform, PIPELINES) # test assertion if img_scale is not tuple or list of tuple with pytest.raises(AssertionError): transform = dict( type='MultiScaleFlipAug', img_scale=[1333, 800], transforms=[dict(type='Resize')]) build_from_cfg(transform, PIPELINES) # test assertion if flip_direction is not str or list of str with pytest.raises(AssertionError): transform = dict( type='MultiScaleFlipAug', img_scale=[(1333, 800)], flip_direction=1, transforms=[dict(type='Resize')]) build_from_cfg(transform, PIPELINES) scale_transform = dict( type='MultiScaleFlipAug', img_scale=[(1333, 800), (1333, 640)], transforms=[dict(type='Resize', keep_ratio=True)]) transform = build_from_cfg(scale_transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['img_fields'] = ['img'] scale_results = transform(copy.deepcopy(results)) assert len(scale_results['img']) == 2 assert scale_results['img'][0].shape == (750, 1333, 3) assert scale_results['img_shape'][0] == (750, 1333, 3) assert scale_results['img'][1].shape == (640, 1138, 3) assert scale_results['img_shape'][1] == (640, 1138, 3) scale_factor_transform = dict( type='MultiScaleFlipAug', scale_factor=[0.8, 1.0, 1.2], transforms=[dict(type='Resize', keep_ratio=False)]) transform = build_from_cfg(scale_factor_transform, PIPELINES) scale_factor_results = transform(copy.deepcopy(results)) assert len(scale_factor_results['img']) == 3 assert scale_factor_results['img'][0].shape == (230, 409, 3) assert scale_factor_results['img_shape'][0] == (230, 409, 3) assert scale_factor_results['img'][1].shape == (288, 512, 3) assert scale_factor_results['img_shape'][1] == (288, 512, 3) assert scale_factor_results['img'][2].shape == (345, 614, 3) assert scale_factor_results['img_shape'][2] == (345, 614, 3) # test pipeline of coco_detection results = dict( img_prefix=osp.join(osp.dirname(__file__), '../../../data'), img_info=dict(filename='color.jpg')) load_cfg, multi_scale_cfg = mmcv.Config.fromfile( 'configs/_base_/datasets/coco_detection.py').test_pipeline load = build_from_cfg(load_cfg, PIPELINES) transform = build_from_cfg(multi_scale_cfg, PIPELINES) results = transform(load(results)) assert len(results['img']) == 1 assert len(results['img_metas']) == 1 assert isinstance(results['img'][0], torch.Tensor) assert isinstance(results['img_metas'][0], mmcv.parallel.DataContainer) assert results['img_metas'][0].data['ori_shape'] == (288, 512, 3) assert results['img_metas'][0].data['img_shape'] == (750, 1333, 3) assert results['img_metas'][0].data['pad_shape'] == (768, 1344, 3) assert results['img_metas'][0].data['scale_factor'].tolist() == [ 2.603515625, 2.6041667461395264, 2.603515625, 2.6041667461395264 ] def test_cutout(): # test n_holes with pytest.raises(AssertionError): transform = dict(type='CutOut', n_holes=(5, 3), cutout_shape=(8, 8)) build_from_cfg(transform, PIPELINES) with pytest.raises(AssertionError): transform = dict(type='CutOut', n_holes=(3, 4, 5), cutout_shape=(8, 8)) build_from_cfg(transform, PIPELINES) # test cutout_shape and cutout_ratio with pytest.raises(AssertionError): transform = dict(type='CutOut', n_holes=1, cutout_shape=8) build_from_cfg(transform, PIPELINES) with pytest.raises(AssertionError): transform = dict(type='CutOut', n_holes=1, cutout_ratio=0.2) build_from_cfg(transform, PIPELINES) # either of cutout_shape and cutout_ratio should be given with pytest.raises(AssertionError): transform = dict(type='CutOut', n_holes=1) build_from_cfg(transform, PIPELINES) with pytest.raises(AssertionError): transform = dict( type='CutOut', n_holes=1, cutout_shape=(2, 2), cutout_ratio=(0.4, 0.4)) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape results['pad_shape'] = img.shape results['img_fields'] = ['img'] transform = dict(type='CutOut', n_holes=1, cutout_shape=(10, 10)) cutout_module = build_from_cfg(transform, PIPELINES) cutout_result = cutout_module(copy.deepcopy(results)) assert cutout_result['img'].sum() < img.sum() transform = dict(type='CutOut', n_holes=1, cutout_ratio=(0.8, 0.8)) cutout_module = build_from_cfg(transform, PIPELINES) cutout_result = cutout_module(copy.deepcopy(results)) assert cutout_result['img'].sum() < img.sum() transform = dict( type='CutOut', n_holes=(2, 4), cutout_shape=[(10, 10), (15, 15)], fill_in=(255, 255, 255)) cutout_module = build_from_cfg(transform, PIPELINES) cutout_result = cutout_module(copy.deepcopy(results)) assert cutout_result['img'].sum() > img.sum() transform = dict( type='CutOut', n_holes=1, cutout_ratio=(0.8, 0.8), fill_in=(255, 255, 255)) cutout_module = build_from_cfg(transform, PIPELINES) cutout_result = cutout_module(copy.deepcopy(results)) assert cutout_result['img'].sum() > img.sum() def test_random_shift(): # test assertion for invalid shift_ratio with pytest.raises(AssertionError): transform = dict(type='RandomShift', shift_ratio=1.5) build_from_cfg(transform, PIPELINES) # test assertion for invalid max_shift_px with pytest.raises(AssertionError): transform = dict(type='RandomShift', max_shift_px=-1) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img # TODO: add img_fields test results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] h, w, _ = img.shape gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='RandomShift', shift_ratio=1.0) random_shift_module = build_from_cfg(transform, PIPELINES) results = random_shift_module(results) assert results['img'].shape[:2] == (h, w) assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32 def test_random_affine(): # test assertion for invalid translate_ratio with pytest.raises(AssertionError): transform = dict(type='RandomAffine', max_translate_ratio=1.5) build_from_cfg(transform, PIPELINES) # test assertion for invalid scaling_ratio_range with pytest.raises(AssertionError): transform = dict(type='RandomAffine', scaling_ratio_range=(1.5, 0.5)) build_from_cfg(transform, PIPELINES) with pytest.raises(AssertionError): transform = dict(type='RandomAffine', scaling_ratio_range=(0, 0.5)) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] h, w, _ = img.shape gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='RandomAffine') random_affine_module = build_from_cfg(transform, PIPELINES) results = random_affine_module(results) assert results['img'].shape[:2] == (h, w) assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32 # test filter bbox gt_bboxes = np.array([[0, 0, 1, 1], [0, 0, 3, 100]], dtype=np.float32) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes transform = dict( type='RandomAffine', max_rotate_degree=0., max_translate_ratio=0., scaling_ratio_range=(1., 1.), max_shear_degree=0., border=(0, 0), min_bbox_size=2, max_aspect_ratio=20, skip_filter=False) random_affine_module = build_from_cfg(transform, PIPELINES) results = random_affine_module(results) assert results['gt_bboxes'].shape[0] == 0 assert results['gt_labels'].shape[0] == 0 assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32 def test_mosaic(): # test assertion for invalid img_scale with pytest.raises(AssertionError): transform = dict(type='Mosaic', img_scale=640) build_from_cfg(transform, PIPELINES) # test assertion for invalid probability with pytest.raises(AssertionError): transform = dict(type='Mosaic', prob=1.5) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img # TODO: add img_fields test results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] h, w, _ = img.shape gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='Mosaic', img_scale=(10, 12)) mosaic_module = build_from_cfg(transform, PIPELINES) # test assertion for invalid mix_results with pytest.raises(AssertionError): mosaic_module(results) results['mix_results'] = [copy.deepcopy(results)] * 3 results = mosaic_module(results) assert results['img'].shape[:2] == (20, 24) assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32 def test_mixup(): # test assertion for invalid img_scale with pytest.raises(AssertionError): transform = dict(type='MixUp', img_scale=640) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img # TODO: add img_fields test results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] h, w, _ = img.shape gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='MixUp', img_scale=(10, 12)) mixup_module = build_from_cfg(transform, PIPELINES) # test assertion for invalid mix_results with pytest.raises(AssertionError): mixup_module(results) with pytest.raises(AssertionError): results['mix_results'] = [copy.deepcopy(results)] * 2 mixup_module(results) results['mix_results'] = [copy.deepcopy(results)] results = mixup_module(results) assert results['img'].shape[:2] == (288, 512) assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32 # test filter bbox : # 2 boxes with sides 1 and 3 are filtered as min_bbox_size=5 gt_bboxes = np.array([[0, 0, 1, 1], [0, 0, 3, 3]], dtype=np.float32) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = np.array([], dtype=np.float32) mixresults = results['mix_results'][0] mixresults['gt_labels'] = copy.deepcopy(results['gt_labels']) mixresults['gt_bboxes'] = copy.deepcopy(results['gt_bboxes']) mixresults['gt_bboxes_ignore'] = copy.deepcopy(results['gt_bboxes_ignore']) transform = dict( type='MixUp', img_scale=(10, 12), ratio_range=(1.5, 1.5), min_bbox_size=5, skip_filter=False) mixup_module = build_from_cfg(transform, PIPELINES) results = mixup_module(results) assert results['gt_bboxes'].shape[0] == 2 assert results['gt_labels'].shape[0] == 2 assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32 def test_photo_metric_distortion(): img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') transform = dict(type='PhotoMetricDistortion') distortion_module = build_from_cfg(transform, PIPELINES) # test assertion for invalid img_fields with pytest.raises(AssertionError): results = dict() results['img'] = img results['img2'] = img results['img_fields'] = ['img', 'img2'] distortion_module(results) # test uint8 input results = dict() results['img'] = img results = distortion_module(results) assert results['img'].dtype == np.float32 # test float32 input results = dict() results['img'] = img.astype(np.float32) results = distortion_module(results) assert results['img'].dtype == np.float32 def test_copypaste(): dst_results, src_results = dict(), dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') dst_results['img'] = img.copy() src_results['img'] = img.copy() h, w, _ = img.shape dst_bboxes = np.array([[0.2 * w, 0.2 * h, 0.4 * w, 0.4 * h], [0.5 * w, 0.5 * h, 0.6 * w, 0.6 * h]], dtype=np.float32) src_bboxes = np.array([[0.1 * w, 0.1 * h, 0.3 * w, 0.5 * h], [0.4 * w, 0.4 * h, 0.7 * w, 0.7 * h], [0.8 * w, 0.8 * h, 0.9 * w, 0.9 * h]], dtype=np.float32) dst_labels = np.ones(dst_bboxes.shape[0], dtype=np.int64) src_labels = np.ones(src_bboxes.shape[0], dtype=np.int64) * 2 dst_masks = create_full_masks(dst_bboxes, w, h) src_masks = create_full_masks(src_bboxes, w, h) dst_results['gt_bboxes'] = dst_bboxes.copy() src_results['gt_bboxes'] = src_bboxes.copy() dst_results['gt_labels'] = dst_labels.copy() src_results['gt_labels'] = src_labels.copy() dst_results['gt_masks'] = copy.deepcopy(dst_masks) src_results['gt_masks'] = copy.deepcopy(src_masks) results = copy.deepcopy(dst_results) transform = dict(type='CopyPaste', selected=False) copypaste_module = build_from_cfg(transform, PIPELINES) # test assertion for invalid mix_results with pytest.raises(AssertionError): copypaste_module(results) results['mix_results'] = [copy.deepcopy(src_results)] results = copypaste_module(results) assert results['img'].shape[:2] == (h, w) # one object of destination image is totally occluded assert results['gt_bboxes'].shape[0] == \ dst_bboxes.shape[0] + src_bboxes.shape[0] - 1 assert results['gt_labels'].shape[0] == \ dst_labels.shape[0] + src_labels.shape[0] - 1 assert results['gt_masks'].masks.shape[0] == \ dst_masks.masks.shape[0] + src_masks.masks.shape[0] - 1 assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 # the object of destination image is partially occluded ori_bbox = dst_bboxes[0] occ_bbox = results['gt_bboxes'][0] ori_mask = dst_masks.masks[0] occ_mask = results['gt_masks'].masks[0] assert ori_mask.sum() > occ_mask.sum() assert np.all(np.abs(occ_bbox - ori_bbox) <= copypaste_module.bbox_occluded_thr) or \ occ_mask.sum() > copypaste_module.mask_occluded_thr # test copypaste with selected objects transform = dict(type='CopyPaste') copypaste_module = build_from_cfg(transform, PIPELINES) results = copy.deepcopy(dst_results) results['mix_results'] = [copy.deepcopy(src_results)] copypaste_module(results) # test copypaste with an empty source image results = copy.deepcopy(dst_results) valid_inds = [False] * src_bboxes.shape[0] src_results['gt_bboxes'] = src_bboxes[valid_inds] src_results['gt_labels'] = src_labels[valid_inds] src_results['gt_masks'] = src_masks[valid_inds] results['mix_results'] = [copy.deepcopy(src_results)] copypaste_module(results) # test copy_paste based on bbox dst_results.pop('gt_masks') src_results.pop('gt_masks') dst_bboxes = dst_results['gt_bboxes'] src_bboxes = src_results['gt_bboxes'] dst_masks = create_full_masks(dst_bboxes, w, h) src_masks = create_full_masks(src_bboxes, w, h) results = copy.deepcopy(dst_results) results['mix_results'] = [copy.deepcopy(src_results)] results = copypaste_module(results) result_masks = create_full_masks(results['gt_bboxes'], w, h) result_masks_np = np.where(result_masks.to_ndarray().sum(0) > 0, 1, 0) masks_np = np.where( (src_masks.to_ndarray().sum(0) + dst_masks.to_ndarray().sum(0)) > 0, 1, 0) assert np.all(result_masks_np == masks_np) assert 'gt_masks' not in results ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_transform/test_translate.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import numpy as np import pycocotools.mask as maskUtils import pytest from mmcv.utils import build_from_cfg from mmdet.core.mask import BitmapMasks, PolygonMasks from mmdet.datasets.builder import PIPELINES def _check_keys(results, results_translated): assert len(set(results.keys()).difference(set( results_translated.keys()))) == 0 assert len(set(results_translated.keys()).difference(set( results.keys()))) == 0 def _pad(h, w, c, pad_val, axis=-1, dtype=np.float32): assert isinstance(pad_val, (int, float, tuple)) if isinstance(pad_val, (int, float)): pad_val = tuple([pad_val] * c) assert len(pad_val) == c pad_data = np.stack([np.ones((h, w)) * pad_val[i] for i in range(c)], axis=axis).astype(dtype) return pad_data def _construct_img(results): h, w = results['img_info']['height'], results['img_info']['width'] img = np.random.uniform(0, 1, (h, w, 3)) * 255 img = img.astype(np.uint8) results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape results['img_fields'] = ['img'] def _construct_ann_info(h=427, w=640, c=3): bboxes = np.array( [[222.62, 217.82, 241.81, 238.93], [50.5, 329.7, 130.23, 384.96], [175.47, 331.97, 254.8, 389.26]], dtype=np.float32) labels = np.array([9, 2, 2], dtype=np.int64) bboxes_ignore = np.array([[59., 253., 311., 337.]], dtype=np.float32) masks = [ [[222.62, 217.82, 222.62, 238.93, 241.81, 238.93, 240.85, 218.78]], [[ 69.19, 332.17, 82.39, 330.25, 97.24, 329.7, 114.01, 331.35, 116.76, 337.39, 119.78, 343.17, 128.03, 344.54, 128.86, 347.84, 124.18, 350.59, 129.96, 358.01, 130.23, 366.54, 129.13, 377.81, 125.28, 382.48, 119.78, 381.93, 117.31, 377.54, 116.21, 379.46, 114.83, 382.21, 107.14, 383.31, 105.49, 378.36, 77.99, 377.54, 75.79, 381.11, 69.74, 381.93, 66.72, 378.91, 65.07, 377.81, 63.15, 379.19, 62.32, 383.31, 52.7, 384.96, 50.5, 379.46, 51.32, 375.61, 51.6, 370.11, 51.6, 364.06, 53.52, 354.99, 56.27, 344.54, 59.57, 336.29, 66.45, 332.72 ]], [[ 175.47, 386.86, 175.87, 376.44, 177.08, 351.2, 189.1, 332.77, 194.31, 331.97, 236.37, 332.77, 244.79, 342.39, 246.79, 346.79, 248.39, 345.99, 251.6, 345.59, 254.8, 348.0, 254.8, 351.6, 250.0, 352.0, 250.0, 354.81, 251.6, 358.41, 251.6, 364.42, 251.6, 370.03, 252.8, 378.04, 252.8, 384.05, 250.8, 387.26, 246.39, 387.66, 245.19, 386.46, 242.38, 388.86, 233.97, 389.26, 232.77, 388.06, 232.77, 383.65, 195.91, 381.25, 195.91, 384.86, 191.1, 384.86, 187.49, 385.26, 186.69, 382.85, 184.29, 382.45, 183.09, 387.26, 178.68, 388.46, 176.28, 387.66 ]] ] return dict( bboxes=bboxes, labels=labels, bboxes_ignore=bboxes_ignore, masks=masks) def _load_bboxes(results): ann_info = results['ann_info'] results['gt_bboxes'] = ann_info['bboxes'].copy() results['bbox_fields'] = ['gt_bboxes'] gt_bboxes_ignore = ann_info.get('bboxes_ignore', None) if gt_bboxes_ignore is not None: results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy() results['bbox_fields'].append('gt_bboxes_ignore') def _load_labels(results): results['gt_labels'] = results['ann_info']['labels'].copy() def _poly2mask(mask_ann, img_h, img_w): if isinstance(mask_ann, list): # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) rle = maskUtils.merge(rles) elif isinstance(mask_ann['counts'], list): # uncompressed RLE rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) else: # rle rle = mask_ann mask = maskUtils.decode(rle) return mask def _process_polygons(polygons): polygons = [np.array(p) for p in polygons] valid_polygons = [] for polygon in polygons: if len(polygon) % 2 == 0 and len(polygon) >= 6: valid_polygons.append(polygon) return valid_polygons def _load_masks(results, poly2mask=True): h, w = results['img_info']['height'], results['img_info']['width'] gt_masks = results['ann_info']['masks'] if poly2mask: gt_masks = BitmapMasks([_poly2mask(mask, h, w) for mask in gt_masks], h, w) else: gt_masks = PolygonMasks( [_process_polygons(polygons) for polygons in gt_masks], h, w) results['gt_masks'] = gt_masks results['mask_fields'] = ['gt_masks'] def _construct_semantic_seg(results): h, w = results['img_info']['height'], results['img_info']['width'] seg_toy = (np.random.uniform(0, 1, (h, w)) * 255).astype(np.uint8) results['gt_semantic_seg'] = seg_toy results['seg_fields'] = ['gt_semantic_seg'] def construct_toy_data(poly2mask=True): img_info = dict(height=427, width=640) ann_info = _construct_ann_info(h=img_info['height'], w=img_info['width']) results = dict(img_info=img_info, ann_info=ann_info) # construct image, similar to 'LoadImageFromFile' _construct_img(results) # 'LoadAnnotations' (bboxes, labels, masks, semantic_seg) _load_bboxes(results) _load_labels(results) _load_masks(results, poly2mask) _construct_semantic_seg(results) return results def test_translate(): # test assertion for invalid value of level with pytest.raises(AssertionError): transform = dict(type='Translate', level=-1) build_from_cfg(transform, PIPELINES) # test assertion for invalid type of level with pytest.raises(AssertionError): transform = dict(type='Translate', level=[1]) build_from_cfg(transform, PIPELINES) # test assertion for invalid prob with pytest.raises(AssertionError): transform = dict(type='Translate', level=1, prob=-0.5) build_from_cfg(transform, PIPELINES) # test assertion for the num of elements in tuple img_fill_val with pytest.raises(AssertionError): transform = dict( type='Translate', level=1, img_fill_val=(128, 128, 128, 128)) build_from_cfg(transform, PIPELINES) # test ValueError for invalid type of img_fill_val with pytest.raises(ValueError): transform = dict( type='Translate', level=1, img_fill_val=[128, 128, 128]) build_from_cfg(transform, PIPELINES) # test assertion for invalid value of img_fill_val with pytest.raises(AssertionError): transform = dict( type='Translate', level=1, img_fill_val=(128, -1, 256)) build_from_cfg(transform, PIPELINES) # test assertion for invalid value of direction with pytest.raises(AssertionError): transform = dict( type='Translate', level=1, img_fill_val=128, direction='diagonal') build_from_cfg(transform, PIPELINES) # test assertion for invalid type of max_translate_offset with pytest.raises(AssertionError): transform = dict( type='Translate', level=1, img_fill_val=128, max_translate_offset=(250., )) build_from_cfg(transform, PIPELINES) # construct toy data example for unit test results = construct_toy_data() def _check_bbox_mask(results, results_translated, offset, direction, min_size=0.): # The key correspondence from bboxes to labels and masks. bbox2label = { 'gt_bboxes': 'gt_labels', 'gt_bboxes_ignore': 'gt_labels_ignore' } bbox2mask = { 'gt_bboxes': 'gt_masks', 'gt_bboxes_ignore': 'gt_masks_ignore' } def _translate_bbox(bboxes, offset, direction, max_h, max_w): if direction == 'horizontal': bboxes[:, 0::2] = bboxes[:, 0::2] + offset elif direction == 'vertical': bboxes[:, 1::2] = bboxes[:, 1::2] + offset else: raise ValueError bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, max_w) bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, max_h) return bboxes h, w, c = results_translated['img'].shape for key in results_translated.get('bbox_fields', []): label_key, mask_key = bbox2label[key], bbox2mask[key] # check length of key if label_key in results: assert len(results_translated[key]) == len( results_translated[label_key]) if mask_key in results: assert len(results_translated[key]) == len( results_translated[mask_key]) # construct gt_bboxes gt_bboxes = _translate_bbox( copy.deepcopy(results[key]), offset, direction, h, w) valid_inds = (gt_bboxes[:, 2] - gt_bboxes[:, 0] > min_size) & ( gt_bboxes[:, 3] - gt_bboxes[:, 1] > min_size) gt_bboxes = gt_bboxes[valid_inds] # check bbox assert np.equal(gt_bboxes, results_translated[key]).all() # construct gt_masks if mask_key not in results: # e.g. 'gt_masks_ignore' continue masks, masks_translated = results[mask_key].to_ndarray( ), results_translated[mask_key].to_ndarray() assert masks.dtype == masks_translated.dtype if direction == 'horizontal': masks_pad = _pad( h, abs(offset), masks.shape[0], 0, axis=0, dtype=masks.dtype) if offset <= 0: # left shift gt_masks = np.concatenate( (masks[:, :, -offset:], masks_pad), axis=-1) else: # right shift gt_masks = np.concatenate( (masks_pad, masks[:, :, :-offset]), axis=-1) else: masks_pad = _pad( abs(offset), w, masks.shape[0], 0, axis=0, dtype=masks.dtype) if offset <= 0: # top shift gt_masks = np.concatenate( (masks[:, -offset:, :], masks_pad), axis=1) else: # bottom shift gt_masks = np.concatenate( (masks_pad, masks[:, :-offset, :]), axis=1) gt_masks = gt_masks[valid_inds] # check masks assert np.equal(gt_masks, masks_translated).all() def _check_img_seg(results, results_translated, keys, offset, fill_val, direction): for key in keys: assert isinstance(results_translated[key], type(results[key])) # assert type(results[key]) == type(results_translated[key]) data, data_translated = results[key], results_translated[key] if 'mask' in key: data, data_translated = data.to_ndarray( ), data_translated.to_ndarray() assert data.dtype == data_translated.dtype if 'img' in key: data, data_translated = data.transpose( (2, 0, 1)), data_translated.transpose((2, 0, 1)) elif 'seg' in key: data, data_translated = data[None, :, :], data_translated[ None, :, :] c, h, w = data.shape if direction == 'horizontal': data_pad = _pad( h, abs(offset), c, fill_val, axis=0, dtype=data.dtype) if offset <= 0: # left shift data_gt = np.concatenate((data[:, :, -offset:], data_pad), axis=-1) else: # right shift data_gt = np.concatenate((data_pad, data[:, :, :-offset]), axis=-1) else: data_pad = _pad( abs(offset), w, c, fill_val, axis=0, dtype=data.dtype) if offset <= 0: # top shift data_gt = np.concatenate((data[:, -offset:, :], data_pad), axis=1) else: # bottom shift data_gt = np.concatenate((data_pad, data[:, :-offset, :]), axis=1) if 'mask' in key: # TODO assertion here. ``data_translated`` must be a subset # (or equal) of ``data_gt`` pass else: assert np.equal(data_gt, data_translated).all() def check_translate(results, results_translated, offset, img_fill_val, seg_ignore_label, direction, min_size=0): # check keys _check_keys(results, results_translated) # check image _check_img_seg(results, results_translated, results.get('img_fields', ['img']), offset, img_fill_val, direction) # check segmentation map _check_img_seg(results, results_translated, results.get('seg_fields', []), offset, seg_ignore_label, direction) # check masks and bboxes _check_bbox_mask(results, results_translated, offset, direction, min_size) # test case when level=0 (without translate aug) img_fill_val = (104, 116, 124) seg_ignore_label = 255 transform = dict( type='Translate', level=0, prob=1.0, img_fill_val=img_fill_val, seg_ignore_label=seg_ignore_label) translate_module = build_from_cfg(transform, PIPELINES) results_wo_translate = translate_module(copy.deepcopy(results)) check_translate( copy.deepcopy(results), results_wo_translate, 0, img_fill_val, seg_ignore_label, 'horizontal', ) # test case when level>0 and translate horizontally (left shift). transform = dict( type='Translate', level=8, prob=1.0, img_fill_val=img_fill_val, random_negative_prob=1.0, seg_ignore_label=seg_ignore_label) translate_module = build_from_cfg(transform, PIPELINES) offset = translate_module.offset results_translated = translate_module(copy.deepcopy(results)) check_translate( copy.deepcopy(results), results_translated, -offset, img_fill_val, seg_ignore_label, 'horizontal', ) # test case when level>0 and translate horizontally (right shift). translate_module.random_negative_prob = 0.0 results_translated = translate_module(copy.deepcopy(results)) check_translate( copy.deepcopy(results), results_translated, offset, img_fill_val, seg_ignore_label, 'horizontal', ) # test case when level>0 and translate vertically (top shift). transform = dict( type='Translate', level=10, prob=1.0, img_fill_val=img_fill_val, seg_ignore_label=seg_ignore_label, random_negative_prob=1.0, direction='vertical') translate_module = build_from_cfg(transform, PIPELINES) offset = translate_module.offset results_translated = translate_module(copy.deepcopy(results)) check_translate( copy.deepcopy(results), results_translated, -offset, img_fill_val, seg_ignore_label, 'vertical') # test case when level>0 and translate vertically (bottom shift). translate_module.random_negative_prob = 0.0 results_translated = translate_module(copy.deepcopy(results)) check_translate( copy.deepcopy(results), results_translated, offset, img_fill_val, seg_ignore_label, 'vertical') # test case when no translation is called (prob<=0) transform = dict( type='Translate', level=8, prob=0.0, img_fill_val=img_fill_val, random_negative_prob=0.0, seg_ignore_label=seg_ignore_label) translate_module = build_from_cfg(transform, PIPELINES) results_translated = translate_module(copy.deepcopy(results)) # test translate vertically with PolygonMasks (top shift) results = construct_toy_data(False) transform = dict( type='Translate', level=10, prob=1.0, img_fill_val=img_fill_val, seg_ignore_label=seg_ignore_label, direction='vertical') translate_module = build_from_cfg(transform, PIPELINES) offset = translate_module.offset translate_module.random_negative_prob = 1.0 results_translated = translate_module(copy.deepcopy(results)) def _translated_gt(masks, direction, offset, out_shape): translated_masks = [] for poly_per_obj in masks: translated_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if direction == 'horizontal': p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) elif direction == 'vertical': p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) if PolygonMasks([[p]], *out_shape).areas[0] > 0: # filter invalid (area=0) translated_poly_per_obj.append(p) if len(translated_poly_per_obj): translated_masks.append(translated_poly_per_obj) translated_masks = PolygonMasks(translated_masks, *out_shape) return translated_masks h, w = results['img_shape'][:2] for key in results.get('mask_fields', []): masks = results[key] translated_gt = _translated_gt(masks, 'vertical', -offset, (h, w)) assert np.equal(results_translated[key].to_ndarray(), translated_gt.to_ndarray()).all() # test translate horizontally with PolygonMasks (right shift) results = construct_toy_data(False) transform = dict( type='Translate', level=8, prob=1.0, img_fill_val=img_fill_val, random_negative_prob=0.0, seg_ignore_label=seg_ignore_label) translate_module = build_from_cfg(transform, PIPELINES) offset = translate_module.offset results_translated = translate_module(copy.deepcopy(results)) h, w = results['img_shape'][:2] for key in results.get('mask_fields', []): masks = results[key] translated_gt = _translated_gt(masks, 'horizontal', offset, (h, w)) assert np.equal(results_translated[key].to_ndarray(), translated_gt.to_ndarray()).all() # test AutoAugment equipped with Translate policies = [[dict(type='Translate', level=10, prob=1.)]] autoaug = dict(type='AutoAugment', policies=policies) autoaug_module = build_from_cfg(autoaug, PIPELINES) autoaug_module(copy.deepcopy(results)) policies = [[ dict(type='Translate', level=10, prob=1.), dict( type='Translate', level=8, img_fill_val=img_fill_val, direction='vertical') ]] autoaug = dict(type='AutoAugment', policies=policies) autoaug_module = build_from_cfg(autoaug, PIPELINES) autoaug_module(copy.deepcopy(results)) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_pipelines/test_transform/utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np from mmdet.core.mask import BitmapMasks, PolygonMasks def _check_fields(results, pipeline_results, keys): """Check data in fields from two results are same.""" for key in keys: if isinstance(results[key], (BitmapMasks, PolygonMasks)): assert np.equal(results[key].to_ndarray(), pipeline_results[key].to_ndarray()).all() else: assert np.equal(results[key], pipeline_results[key]).all() assert results[key].dtype == pipeline_results[key].dtype def check_result_same(results, pipeline_results): """Check whether the `pipeline_results` is the same with the predefined `results`. Args: results (dict): Predefined results which should be the standard output of the transform pipeline. pipeline_results (dict): Results processed by the transform pipeline. """ # check image _check_fields(results, pipeline_results, results.get('img_fields', ['img'])) # check bboxes _check_fields(results, pipeline_results, results.get('bbox_fields', [])) # check masks _check_fields(results, pipeline_results, results.get('mask_fields', [])) # check segmentations _check_fields(results, pipeline_results, results.get('seg_fields', [])) # check gt_labels if 'gt_labels' in results: assert np.equal(results['gt_labels'], pipeline_results['gt_labels']).all() def construct_toy_data(poly2mask=True): img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8) img = np.stack([img, img, img], axis=-1) results = dict() # image results['img'] = img results['img_shape'] = img.shape results['img_fields'] = ['img'] # bboxes results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] results['gt_bboxes'] = np.array([[0., 0., 2., 1.]], dtype=np.float32) results['gt_bboxes_ignore'] = np.array([[2., 0., 3., 1.]], dtype=np.float32) # labels results['gt_labels'] = np.array([1], dtype=np.int64) # masks results['mask_fields'] = ['gt_masks'] if poly2mask: gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0]], dtype=np.uint8)[None, :, :] results['gt_masks'] = BitmapMasks(gt_masks, 2, 4) else: raw_masks = [[np.array([0, 0, 2, 0, 2, 1, 0, 1], dtype=np.float)]] results['gt_masks'] = PolygonMasks(raw_masks, 2, 4) # segmentations results['seg_fields'] = ['gt_semantic_seg'] results['gt_semantic_seg'] = img[..., 0] return results def create_random_bboxes(num_bboxes, img_w, img_h): bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2)) bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2)) bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1) bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype( np.float32) return bboxes def create_full_masks(gt_bboxes, img_w, img_h): xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2] xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4] gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8) for i in range(len(gt_bboxes)): gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1 gt_masks = BitmapMasks(gt_masks, img_h, img_w) return gt_masks ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_data/test_utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest from mmdet.datasets import get_loading_pipeline, replace_ImageToTensor def test_replace_ImageToTensor(): # with MultiScaleFlipAug pipelines = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize'), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] expected_pipelines = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize'), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] with pytest.warns(UserWarning): assert expected_pipelines == replace_ImageToTensor(pipelines) # without MultiScaleFlipAug pipelines = [ dict(type='LoadImageFromFile'), dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize'), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ] expected_pipelines = [ dict(type='LoadImageFromFile'), dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize'), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ] with pytest.warns(UserWarning): assert expected_pipelines == replace_ImageToTensor(pipelines) def test_get_loading_pipeline(): pipelines = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] expected_pipelines = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True) ] assert expected_pipelines == \ get_loading_pipeline(pipelines) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_downstream/test_mmtrack.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy from collections import defaultdict import numpy as np import pytest import torch from mmcv import Config @pytest.mark.parametrize( 'cfg_file', ['./tests/data/configs_mmtrack/selsa_faster_rcnn_r101_dc5_1x.py']) def test_vid_fgfa_style_forward(cfg_file): config = Config.fromfile(cfg_file) model = copy.deepcopy(config.model) model.pretrains = None model.detector.pretrained = None from mmtrack.models import build_model detector = build_model(model) # Test forward train with a non-empty truth batch input_shape = (1, 3, 256, 256) mm_inputs = _demo_mm_inputs(input_shape, num_items=[10]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') img_metas[0]['is_video_data'] = True gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] gt_masks = mm_inputs['gt_masks'] ref_input_shape = (2, 3, 256, 256) ref_mm_inputs = _demo_mm_inputs(ref_input_shape, num_items=[9, 11]) ref_img = ref_mm_inputs.pop('imgs')[None] ref_img_metas = ref_mm_inputs.pop('img_metas') ref_img_metas[0]['is_video_data'] = True ref_img_metas[1]['is_video_data'] = True ref_gt_bboxes = ref_mm_inputs['gt_bboxes'] ref_gt_labels = ref_mm_inputs['gt_labels'] ref_gt_masks = ref_mm_inputs['gt_masks'] losses = detector.forward( img=imgs, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, ref_img=ref_img, ref_img_metas=[ref_img_metas], ref_gt_bboxes=ref_gt_bboxes, ref_gt_labels=ref_gt_labels, gt_masks=gt_masks, ref_gt_masks=ref_gt_masks, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) loss.requires_grad_(True) assert float(loss.item()) > 0 loss.backward() # Test forward train with an empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[0]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') img_metas[0]['is_video_data'] = True gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] gt_masks = mm_inputs['gt_masks'] ref_mm_inputs = _demo_mm_inputs(ref_input_shape, num_items=[0, 0]) ref_imgs = ref_mm_inputs.pop('imgs')[None] ref_img_metas = ref_mm_inputs.pop('img_metas') ref_img_metas[0]['is_video_data'] = True ref_img_metas[1]['is_video_data'] = True ref_gt_bboxes = ref_mm_inputs['gt_bboxes'] ref_gt_labels = ref_mm_inputs['gt_labels'] ref_gt_masks = ref_mm_inputs['gt_masks'] losses = detector.forward( img=imgs, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, ref_img=ref_imgs, ref_img_metas=[ref_img_metas], ref_gt_bboxes=ref_gt_bboxes, ref_gt_labels=ref_gt_labels, gt_masks=gt_masks, ref_gt_masks=ref_gt_masks, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) loss.requires_grad_(True) assert float(loss.item()) > 0 loss.backward() # Test forward test with frame_stride=1 and frame_range=[-1,0] with torch.no_grad(): imgs = torch.cat([imgs, imgs.clone()], dim=0) img_list = [g[None, :] for g in imgs] img_metas.extend(copy.deepcopy(img_metas)) for i in range(len(img_metas)): img_metas[i]['frame_id'] = i img_metas[i]['num_left_ref_imgs'] = 1 img_metas[i]['frame_stride'] = 1 ref_imgs = [ref_imgs.clone(), imgs[[0]][None].clone()] ref_img_metas = [ copy.deepcopy(ref_img_metas), copy.deepcopy([img_metas[0]]) ] results = defaultdict(list) for one_img, one_meta, ref_img, ref_img_meta in zip( img_list, img_metas, ref_imgs, ref_img_metas): result = detector.forward([one_img], [[one_meta]], ref_img=[ref_img], ref_img_metas=[[ref_img_meta]], return_loss=False) for k, v in result.items(): results[k].append(v) @pytest.mark.parametrize('cfg_file', [ './tests/data/configs_mmtrack/tracktor_faster-rcnn_r50_fpn_4e.py', ]) def test_tracktor_forward(cfg_file): config = Config.fromfile(cfg_file) model = copy.deepcopy(config.model) model.pretrains = None model.detector.pretrained = None from mmtrack.models import build_model mot = build_model(model) mot.eval() input_shape = (1, 3, 256, 256) mm_inputs = _demo_mm_inputs(input_shape, num_items=[10], with_track=True) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') with torch.no_grad(): imgs = torch.cat([imgs, imgs.clone()], dim=0) img_list = [g[None, :] for g in imgs] img2_metas = copy.deepcopy(img_metas) img2_metas[0]['frame_id'] = 1 img_metas.extend(img2_metas) results = defaultdict(list) for one_img, one_meta in zip(img_list, img_metas): result = mot.forward([one_img], [[one_meta]], return_loss=False) for k, v in result.items(): results[k].append(v) def _demo_mm_inputs( input_shape=(1, 3, 300, 300), num_items=None, num_classes=10, with_track=False): """Create a superset of inputs needed to run test or train batches. Args: input_shape (tuple): input batch dimensions num_items (None | List[int]): specifies the number of boxes in each batch item num_classes (int): number of different labels a box might have """ from mmdet.core import BitmapMasks (N, C, H, W) = input_shape rng = np.random.RandomState(0) imgs = rng.rand(*input_shape) img_metas = [{ 'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '.png', 'scale_factor': 1.0, 'flip': False, 'frame_id': 0, 'img_norm_cfg': { 'mean': (128.0, 128.0, 128.0), 'std': (10.0, 10.0, 10.0) } } for i in range(N)] gt_bboxes = [] gt_labels = [] gt_masks = [] gt_match_indices = [] for batch_idx in range(N): if num_items is None: num_boxes = rng.randint(1, 10) else: num_boxes = num_items[batch_idx] cx, cy, bw, bh = rng.rand(num_boxes, 4).T tl_x = ((cx * W) - (W * bw / 2)).clip(0, W) tl_y = ((cy * H) - (H * bh / 2)).clip(0, H) br_x = ((cx * W) + (W * bw / 2)).clip(0, W) br_y = ((cy * H) + (H * bh / 2)).clip(0, H) boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T class_idxs = rng.randint(1, num_classes, size=num_boxes) gt_bboxes.append(torch.FloatTensor(boxes)) gt_labels.append(torch.LongTensor(class_idxs)) if with_track: gt_match_indices.append(torch.arange(boxes.shape[0])) mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8) gt_masks.append(BitmapMasks(mask, H, W)) mm_inputs = { 'imgs': torch.FloatTensor(imgs).requires_grad_(True), 'img_metas': img_metas, 'gt_bboxes': gt_bboxes, 'gt_labels': gt_labels, 'gt_bboxes_ignore': None, 'gt_masks': gt_masks, } if with_track: mm_inputs['gt_match_indices'] = gt_match_indices return mm_inputs ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_metrics/test_box_overlap.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest import torch from mmdet.core import BboxOverlaps2D, bbox_overlaps from mmdet.core.evaluation.bbox_overlaps import \ bbox_overlaps as recall_overlaps def test_bbox_overlaps_2d(eps=1e-7): def _construct_bbox(num_bbox=None): img_h = int(np.random.randint(3, 1000)) img_w = int(np.random.randint(3, 1000)) if num_bbox is None: num_bbox = np.random.randint(1, 10) x1y1 = torch.rand((num_bbox, 2)) x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1) bboxes = torch.cat((x1y1, x2y2), -1) bboxes[:, 0::2] *= img_w bboxes[:, 1::2] *= img_h return bboxes, num_bbox # is_aligned is True, bboxes.size(-1) == 5 (include score) self = BboxOverlaps2D() bboxes1, num_bbox = _construct_bbox() bboxes2, _ = _construct_bbox(num_bbox) bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1) bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1) gious = self(bboxes1, bboxes2, 'giou', True) assert gious.size() == (num_bbox, ), gious.size() assert torch.all(gious >= -1) and torch.all(gious <= 1) # is_aligned is True, bboxes1.size(-2) == 0 bboxes1 = torch.empty((0, 4)) bboxes2 = torch.empty((0, 4)) gious = self(bboxes1, bboxes2, 'giou', True) assert gious.size() == (0, ), gious.size() assert torch.all(gious == torch.empty((0, ))) assert torch.all(gious >= -1) and torch.all(gious <= 1) # is_aligned is True, and bboxes.ndims > 2 bboxes1, num_bbox = _construct_bbox() bboxes2, _ = _construct_bbox(num_bbox) bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1) # test assertion when batch dim is not the same with pytest.raises(AssertionError): self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True) bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1) gious = self(bboxes1, bboxes2, 'giou', True) assert torch.all(gious >= -1) and torch.all(gious <= 1) assert gious.size() == (2, num_bbox) bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1) bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1) gious = self(bboxes1, bboxes2, 'giou', True) assert torch.all(gious >= -1) and torch.all(gious <= 1) assert gious.size() == (2, 2, num_bbox) # is_aligned is False bboxes1, num_bbox1 = _construct_bbox() bboxes2, num_bbox2 = _construct_bbox() gious = self(bboxes1, bboxes2, 'giou') assert torch.all(gious >= -1) and torch.all(gious <= 1) assert gious.size() == (num_bbox1, num_bbox2) # is_aligned is False, and bboxes.ndims > 2 bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1) bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1) gious = self(bboxes1, bboxes2, 'giou') assert torch.all(gious >= -1) and torch.all(gious <= 1) assert gious.size() == (2, num_bbox1, num_bbox2) bboxes1 = bboxes1.unsqueeze(0) bboxes2 = bboxes2.unsqueeze(0) gious = self(bboxes1, bboxes2, 'giou') assert torch.all(gious >= -1) and torch.all(gious <= 1) assert gious.size() == (1, 2, num_bbox1, num_bbox2) # is_aligned is False, bboxes1.size(-2) == 0 gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou') assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2))) assert torch.all(gious >= -1) and torch.all(gious <= 1) # test allclose between bbox_overlaps and the original official # implementation. bboxes1 = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [32, 32, 38, 42], ]) bboxes2 = torch.FloatTensor([ [0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20], ]) gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps) gious = gious.numpy().round(4) # the gt is got with four decimal precision. expected_gious = np.array([0.5000, -0.0500, -0.8214]) assert np.allclose(gious, expected_gious, rtol=0, atol=eps) # test mode 'iof' ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps) assert torch.all(ious >= -1) and torch.all(ious <= 1) assert ious.size() == (bboxes1.size(0), ) ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps) assert torch.all(ious >= -1) and torch.all(ious <= 1) assert ious.size() == (bboxes1.size(0), bboxes2.size(0)) def test_voc_recall_overlaps(): def _construct_bbox(num_bbox=None): img_h = int(np.random.randint(3, 1000)) img_w = int(np.random.randint(3, 1000)) if num_bbox is None: num_bbox = np.random.randint(1, 10) x1y1 = torch.rand((num_bbox, 2)) x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1) bboxes = torch.cat((x1y1, x2y2), -1) bboxes[:, 0::2] *= img_w bboxes[:, 1::2] *= img_h return bboxes.numpy(), num_bbox bboxes1, num_bbox = _construct_bbox() bboxes2, _ = _construct_bbox(num_bbox) ious = recall_overlaps( bboxes1, bboxes2, 'iou', use_legacy_coordinate=False) assert ious.shape == (num_bbox, num_bbox) assert np.all(ious >= -1) and np.all(ious <= 1) ious = recall_overlaps(bboxes1, bboxes2, 'iou', use_legacy_coordinate=True) assert ious.shape == (num_bbox, num_bbox) assert np.all(ious >= -1) and np.all(ious <= 1) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_metrics/test_losses.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models import Accuracy, build_loss def test_ce_loss(): # use_mask and use_sigmoid cannot be true at the same time with pytest.raises(AssertionError): loss_cfg = dict( type='CrossEntropyLoss', use_mask=True, use_sigmoid=True, loss_weight=1.0) build_loss(loss_cfg) # test loss with class weights loss_cls_cfg = dict( type='CrossEntropyLoss', use_sigmoid=False, class_weight=[0.8, 0.2], loss_weight=1.0) loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[100, -100]]) fake_label = torch.Tensor([1]).long() assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.)) loss_cls_cfg = dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0) loss_cls = build_loss(loss_cls_cfg) assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.)) def test_varifocal_loss(): # only sigmoid version of VarifocalLoss is implemented with pytest.raises(AssertionError): loss_cfg = dict( type='VarifocalLoss', use_sigmoid=False, loss_weight=1.0) build_loss(loss_cfg) # test that alpha should be greater than 0 with pytest.raises(AssertionError): loss_cfg = dict( type='VarifocalLoss', alpha=-0.75, gamma=2.0, use_sigmoid=True, loss_weight=1.0) build_loss(loss_cfg) # test that pred and target should be of the same size loss_cls_cfg = dict( type='VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0) loss_cls = build_loss(loss_cls_cfg) with pytest.raises(AssertionError): fake_pred = torch.Tensor([[100.0, -100.0]]) fake_target = torch.Tensor([[1.0]]) loss_cls(fake_pred, fake_target) # test the calculation loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[100.0, -100.0]]) fake_target = torch.Tensor([[1.0, 0.0]]) assert torch.allclose(loss_cls(fake_pred, fake_target), torch.tensor(0.0)) # test the loss with weights loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[0.0, 100.0]]) fake_target = torch.Tensor([[1.0, 1.0]]) fake_weight = torch.Tensor([0.0, 1.0]) assert torch.allclose( loss_cls(fake_pred, fake_target, fake_weight), torch.tensor(0.0)) def test_kd_loss(): # test that temperature should be greater than 1 with pytest.raises(AssertionError): loss_cfg = dict( type='KnowledgeDistillationKLDivLoss', loss_weight=1.0, T=0.5) build_loss(loss_cfg) # test that pred and target should be of the same size loss_cls_cfg = dict( type='KnowledgeDistillationKLDivLoss', loss_weight=1.0, T=1) loss_cls = build_loss(loss_cls_cfg) with pytest.raises(AssertionError): fake_pred = torch.Tensor([[100, -100]]) fake_label = torch.Tensor([1]).long() loss_cls(fake_pred, fake_label) # test the calculation loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[100.0, 100.0]]) fake_target = torch.Tensor([[1.0, 1.0]]) assert torch.allclose(loss_cls(fake_pred, fake_target), torch.tensor(0.0)) # test the loss with weights loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[100.0, -100.0], [100.0, 100.0]]) fake_target = torch.Tensor([[1.0, 0.0], [1.0, 1.0]]) fake_weight = torch.Tensor([0.0, 1.0]) assert torch.allclose( loss_cls(fake_pred, fake_target, fake_weight), torch.tensor(0.0)) def test_seesaw_loss(): # only softmax version of Seesaw Loss is implemented with pytest.raises(AssertionError): loss_cfg = dict(type='SeesawLoss', use_sigmoid=True, loss_weight=1.0) build_loss(loss_cfg) # test that cls_score.size(-1) == num_classes + 2 loss_cls_cfg = dict( type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2) loss_cls = build_loss(loss_cls_cfg) # the length of fake_pred should be num_classes + 2 = 4 with pytest.raises(AssertionError): fake_pred = torch.Tensor([[-100, 100]]) fake_label = torch.Tensor([1]).long() loss_cls(fake_pred, fake_label) # the length of fake_pred should be num_classes + 2 = 4 with pytest.raises(AssertionError): fake_pred = torch.Tensor([[-100, 100, -100]]) fake_label = torch.Tensor([1]).long() loss_cls(fake_pred, fake_label) # test the calculation without p and q loss_cls_cfg = dict( type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2) loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[-100, 100, -100, 100]]) fake_label = torch.Tensor([1]).long() loss = loss_cls(fake_pred, fake_label) assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.)) assert torch.allclose(loss['loss_cls_classes'], torch.tensor(0.)) # test the calculation with p and without q loss_cls_cfg = dict( type='SeesawLoss', p=1.0, q=0.0, loss_weight=1.0, num_classes=2) loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[-100, 100, -100, 100]]) fake_label = torch.Tensor([0]).long() loss_cls.cum_samples[0] = torch.exp(torch.Tensor([20])) loss = loss_cls(fake_pred, fake_label) assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.)) assert torch.allclose(loss['loss_cls_classes'], torch.tensor(180.)) # test the calculation with q and without p loss_cls_cfg = dict( type='SeesawLoss', p=0.0, q=1.0, loss_weight=1.0, num_classes=2) loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[-100, 100, -100, 100]]) fake_label = torch.Tensor([0]).long() loss = loss_cls(fake_pred, fake_label) assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.)) assert torch.allclose(loss['loss_cls_classes'], torch.tensor(200.) + torch.tensor(100.).log()) # test the others loss_cls_cfg = dict( type='SeesawLoss', p=0.0, q=1.0, loss_weight=1.0, num_classes=2, return_dict=False) loss_cls = build_loss(loss_cls_cfg) fake_pred = torch.Tensor([[100, -100, 100, -100]]) fake_label = torch.Tensor([0]).long() loss = loss_cls(fake_pred, fake_label) acc = loss_cls.get_accuracy(fake_pred, fake_label) act = loss_cls.get_activation(fake_pred) assert torch.allclose(loss, torch.tensor(0.)) assert torch.allclose(acc['acc_objectness'], torch.tensor(100.)) assert torch.allclose(acc['acc_classes'], torch.tensor(100.)) assert torch.allclose(act, torch.tensor([1., 0., 0.])) def test_accuracy(): # test for empty pred pred = torch.empty(0, 4) label = torch.empty(0) accuracy = Accuracy(topk=1) acc = accuracy(pred, label) assert acc.item() == 0 pred = torch.Tensor([[0.2, 0.3, 0.6, 0.5], [0.1, 0.1, 0.2, 0.6], [0.9, 0.0, 0.0, 0.1], [0.4, 0.7, 0.1, 0.1], [0.0, 0.0, 0.99, 0]]) # test for top1 true_label = torch.Tensor([2, 3, 0, 1, 2]).long() accuracy = Accuracy(topk=1) acc = accuracy(pred, true_label) assert acc.item() == 100 # test for top1 with score thresh=0.8 true_label = torch.Tensor([2, 3, 0, 1, 2]).long() accuracy = Accuracy(topk=1, thresh=0.8) acc = accuracy(pred, true_label) assert acc.item() == 40 # test for top2 accuracy = Accuracy(topk=2) label = torch.Tensor([3, 2, 0, 0, 2]).long() acc = accuracy(pred, label) assert acc.item() == 100 # test for both top1 and top2 accuracy = Accuracy(topk=(1, 2)) true_label = torch.Tensor([2, 3, 0, 1, 2]).long() acc = accuracy(pred, true_label) for a in acc: assert a.item() == 100 # topk is larger than pred class number with pytest.raises(AssertionError): accuracy = Accuracy(topk=5) accuracy(pred, true_label) # wrong topk type with pytest.raises(AssertionError): accuracy = Accuracy(topk='wrong type') accuracy(pred, true_label) # label size is larger than required with pytest.raises(AssertionError): label = torch.Tensor([2, 3, 0, 1, 2, 0]).long() # size mismatch accuracy = Accuracy() accuracy(pred, label) # wrong pred dimension with pytest.raises(AssertionError): accuracy = Accuracy() accuracy(pred[:, :, None], true_label) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_metrics/test_mean_ap.py ================================================ import numpy as np from mmdet.core.evaluation.mean_ap import (eval_map, tpfp_default, tpfp_imagenet, tpfp_openimages) det_bboxes = np.array([ [0, 0, 10, 10], [10, 10, 20, 20], [32, 32, 38, 42], ]) gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]]) gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]]) def test_tpfp_imagenet(): result = tpfp_imagenet( det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, use_legacy_coordinate=True) tp = result[0] fp = result[1] assert tp.shape == (1, 3) assert fp.shape == (1, 3) assert (tp == np.array([[1, 1, 0]])).all() assert (fp == np.array([[0, 0, 1]])).all() result = tpfp_imagenet( det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, use_legacy_coordinate=False) tp = result[0] fp = result[1] assert tp.shape == (1, 3) assert fp.shape == (1, 3) assert (tp == np.array([[1, 1, 0]])).all() assert (fp == np.array([[0, 0, 1]])).all() def test_tpfp_default(): result = tpfp_default( det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, use_legacy_coordinate=True) tp = result[0] fp = result[1] assert tp.shape == (1, 3) assert fp.shape == (1, 3) assert (tp == np.array([[1, 1, 0]])).all() assert (fp == np.array([[0, 0, 1]])).all() result = tpfp_default( det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, use_legacy_coordinate=False) tp = result[0] fp = result[1] assert tp.shape == (1, 3) assert fp.shape == (1, 3) assert (tp == np.array([[1, 1, 0]])).all() assert (fp == np.array([[0, 0, 1]])).all() def test_eval_map(): # 2 image and 2 classes det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]] labels = np.array([0, 1, 1]) labels_ignore = np.array([0, 1]) gt_info = { 'bboxes': gt_bboxes, 'bboxes_ignore': gt_ignore, 'labels': labels, 'labels_ignore': labels_ignore } annotations = [gt_info, gt_info] mean_ap, eval_results = eval_map( det_results, annotations, use_legacy_coordinate=True) assert 0.291 < mean_ap < 0.293 mean_ap, eval_results = eval_map( det_results, annotations, use_legacy_coordinate=False) assert 0.291 < mean_ap < 0.293 # 1 image and 2 classes det_results = [[det_bboxes, det_bboxes]] labels = np.array([0, 1, 1]) labels_ignore = np.array([0, 1]) gt_info = { 'bboxes': gt_bboxes, 'bboxes_ignore': gt_ignore, 'labels': labels, 'labels_ignore': labels_ignore } annotations = [gt_info] mean_ap, eval_results = eval_map( det_results, annotations, use_legacy_coordinate=True) assert 0.291 < mean_ap < 0.293 mean_ap, eval_results = eval_map( det_results, annotations, use_legacy_coordinate=False) assert 0.291 < mean_ap < 0.293 def test_tpfp_openimages(): det_bboxes = np.array([[10, 10, 15, 15, 1.0], [15, 15, 30, 30, 0.98], [10, 10, 25, 25, 0.98], [28, 28, 35, 35, 0.97], [30, 30, 51, 51, 0.96], [100, 110, 120, 130, 0.15]]) gt_bboxes = np.array([[10., 10., 30., 30.], [30., 30., 50., 50.]]) gt_groups_of = np.array([True, False], dtype=np.bool) gt_ignore = np.zeros((0, 4)) # Open Images evaluation using group of. result = tpfp_openimages( det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, gt_bboxes_group_of=gt_groups_of, use_group_of=True, ioa_thr=0.5) tp = result[0] fp = result[1] cls_dets = result[2] assert tp.shape == (1, 4) assert fp.shape == (1, 4) assert cls_dets.shape == (4, 5) assert (tp == np.array([[0, 1, 0, 1]])).all() assert (fp == np.array([[1, 0, 1, 0]])).all() cls_dets_gt = np.array([[28., 28., 35., 35., 0.97], [30., 30., 51., 51., 0.96], [100., 110., 120., 130., 0.15], [10., 10., 15., 15., 1.]]) assert (cls_dets == cls_dets_gt).all() # Open Images evaluation not using group of. result = tpfp_openimages( det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, gt_bboxes_group_of=gt_groups_of, use_group_of=False, ioa_thr=0.5) tp = result[0] fp = result[1] cls_dets = result[2] assert tp.shape == (1, 6) assert fp.shape == (1, 6) assert cls_dets.shape == (6, 5) # Open Images evaluation using group of, and gt is all group of bboxes. gt_groups_of = np.array([True, True], dtype=np.bool) result = tpfp_openimages( det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, gt_bboxes_group_of=gt_groups_of, use_group_of=True, ioa_thr=0.5) tp = result[0] fp = result[1] cls_dets = result[2] assert tp.shape == (1, 3) assert fp.shape == (1, 3) assert cls_dets.shape == (3, 5) # Open Images evaluation with empty gt. gt_bboxes = np.zeros((0, 4)) gt_groups_of = np.empty((0)) result = tpfp_openimages( det_bboxes, gt_bboxes, gt_bboxes_ignore=gt_ignore, gt_bboxes_group_of=gt_groups_of, use_group_of=True, ioa_thr=0.5) fp = result[1] assert (fp == np.array([[1, 1, 1, 1, 1, 1]])).all() ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_metrics/test_recall.py ================================================ import numpy as np from mmdet.core.evaluation.recall import eval_recalls det_bboxes = np.array([ [0, 0, 10, 10], [10, 10, 20, 20], [32, 32, 38, 42], ]) gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]]) gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]]) def test_eval_recalls(): gts = [gt_bboxes, gt_bboxes, gt_bboxes] proposals = [det_bboxes, det_bboxes, det_bboxes] recall = eval_recalls( gts, proposals, proposal_nums=2, use_legacy_coordinate=True) assert recall.shape == (1, 1) assert 0.66 < recall[0][0] < 0.667 recall = eval_recalls( gts, proposals, proposal_nums=2, use_legacy_coordinate=False) assert recall.shape == (1, 1) assert 0.66 < recall[0][0] < 0.667 recall = eval_recalls( gts, proposals, proposal_nums=2, use_legacy_coordinate=True) assert recall.shape == (1, 1) assert 0.66 < recall[0][0] < 0.667 recall = eval_recalls( gts, proposals, iou_thrs=[0.1, 0.9], proposal_nums=2, use_legacy_coordinate=False) assert recall.shape == (1, 2) assert recall[0][1] <= recall[0][0] recall = eval_recalls( gts, proposals, iou_thrs=[0.1, 0.9], proposal_nums=2, use_legacy_coordinate=True) assert recall.shape == (1, 2) assert recall[0][1] <= recall[0][0] ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .utils import check_norm_state, is_block, is_norm __all__ = ['is_block', 'is_norm', 'check_norm_state'] ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_csp_darknet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from torch.nn.modules.batchnorm import _BatchNorm from mmdet.models.backbones.csp_darknet import CSPDarknet from .utils import check_norm_state, is_norm def test_csp_darknet_backbone(): with pytest.raises(ValueError): # frozen_stages must in range(-1, len(arch_setting) + 1) CSPDarknet(frozen_stages=6) with pytest.raises(AssertionError): # out_indices in range(len(arch_setting) + 1) CSPDarknet(out_indices=[6]) # Test CSPDarknet with first stage frozen frozen_stages = 1 model = CSPDarknet(frozen_stages=frozen_stages) model.train() for mod in model.stem.modules(): for param in mod.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(model, f'stage{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # Test CSPDarknet with norm_eval=True model = CSPDarknet(norm_eval=True) model.train() assert check_norm_state(model.modules(), False) # Test CSPDarknet-P5 forward with widen_factor=0.5 model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5)) model.train() imgs = torch.randn(1, 3, 64, 64) feat = model(imgs) assert len(feat) == 5 assert feat[0].shape == torch.Size((1, 16, 32, 32)) assert feat[1].shape == torch.Size((1, 32, 16, 16)) assert feat[2].shape == torch.Size((1, 64, 8, 8)) assert feat[3].shape == torch.Size((1, 128, 4, 4)) assert feat[4].shape == torch.Size((1, 256, 2, 2)) # Test CSPDarknet-P6 forward with widen_factor=0.5 model = CSPDarknet( arch='P6', widen_factor=0.25, out_indices=range(0, 6), spp_kernal_sizes=(3, 5, 7)) model.train() imgs = torch.randn(1, 3, 128, 128) feat = model(imgs) assert feat[0].shape == torch.Size((1, 16, 64, 64)) assert feat[1].shape == torch.Size((1, 32, 32, 32)) assert feat[2].shape == torch.Size((1, 64, 16, 16)) assert feat[3].shape == torch.Size((1, 128, 8, 8)) assert feat[4].shape == torch.Size((1, 192, 4, 4)) assert feat[5].shape == torch.Size((1, 256, 2, 2)) # Test CSPDarknet forward with dict(type='ReLU') model = CSPDarknet( widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5)) model.train() imgs = torch.randn(1, 3, 64, 64) feat = model(imgs) assert len(feat) == 5 assert feat[0].shape == torch.Size((1, 8, 32, 32)) assert feat[1].shape == torch.Size((1, 16, 16, 16)) assert feat[2].shape == torch.Size((1, 32, 8, 8)) assert feat[3].shape == torch.Size((1, 64, 4, 4)) assert feat[4].shape == torch.Size((1, 128, 2, 2)) # Test CSPDarknet with BatchNorm forward model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5)) for m in model.modules(): if is_norm(m): assert isinstance(m, _BatchNorm) model.train() imgs = torch.randn(1, 3, 64, 64) feat = model(imgs) assert len(feat) == 5 assert feat[0].shape == torch.Size((1, 8, 32, 32)) assert feat[1].shape == torch.Size((1, 16, 16, 16)) assert feat[2].shape == torch.Size((1, 32, 8, 8)) assert feat[3].shape == torch.Size((1, 64, 4, 4)) assert feat[4].shape == torch.Size((1, 128, 2, 2)) # Test CSPDarknet with custom arch forward arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False], [224, 512, 1, True, False]] model = CSPDarknet( arch_ovewrite=arch_ovewrite, widen_factor=0.25, out_indices=(0, 1, 2, 3)) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size((1, 8, 16, 16)) assert feat[1].shape == torch.Size((1, 14, 8, 8)) assert feat[2].shape == torch.Size((1, 56, 4, 4)) assert feat[3].shape == torch.Size((1, 128, 2, 2)) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_detectors_resnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest from mmdet.models.backbones import DetectoRS_ResNet def test_detectorrs_resnet_backbone(): detectorrs_cfg = dict( depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True) """Test init_weights config""" with pytest.raises(AssertionError): # pretrained and init_cfg cannot be specified at the same time DetectoRS_ResNet( **detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained') with pytest.raises(AssertionError): # init_cfg must be a dict DetectoRS_ResNet( **detectorrs_cfg, pretrained=None, init_cfg=['Pretrained']) with pytest.raises(KeyError): # init_cfg must contain the key `type` DetectoRS_ResNet( **detectorrs_cfg, pretrained=None, init_cfg=dict(checkpoint='Pretrained')) with pytest.raises(AssertionError): # init_cfg only support initialize pretrained model way DetectoRS_ResNet( **detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained')) with pytest.raises(TypeError): # pretrained mast be a str or None model = DetectoRS_ResNet( **detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None) model.init_weights() ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_efficientnet.py ================================================ import pytest import torch from mmdet.models.backbones import EfficientNet def test_efficientnet_backbone(): """Test EfficientNet backbone.""" with pytest.raises(AssertionError): # EfficientNet arch should be a key in EfficientNet.arch_settings EfficientNet(arch='c3') model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6)) model.train() imgs = torch.randn(2, 3, 32, 32) feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size([2, 32, 16, 16]) assert feat[1].shape == torch.Size([2, 16, 16, 16]) assert feat[2].shape == torch.Size([2, 24, 8, 8]) assert feat[3].shape == torch.Size([2, 40, 4, 4]) assert feat[4].shape == torch.Size([2, 112, 2, 2]) assert feat[5].shape == torch.Size([2, 320, 1, 1]) assert feat[6].shape == torch.Size([2, 1280, 1, 1]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_hourglass.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones.hourglass import HourglassNet def test_hourglass_backbone(): with pytest.raises(AssertionError): # HourglassNet's num_stacks should larger than 0 HourglassNet(num_stacks=0) with pytest.raises(AssertionError): # len(stage_channels) should equal len(stage_blocks) HourglassNet( stage_channels=[256, 256, 384, 384, 384], stage_blocks=[2, 2, 2, 2, 2, 4]) with pytest.raises(AssertionError): # len(stage_channels) should lagrer than downsample_times HourglassNet( downsample_times=5, stage_channels=[256, 256, 384, 384, 384], stage_blocks=[2, 2, 2, 2, 2]) # Test HourglassNet-52 model = HourglassNet( num_stacks=1, stage_channels=(64, 64, 96, 96, 96, 128), feat_channel=64) model.train() imgs = torch.randn(1, 3, 256, 256) feat = model(imgs) assert len(feat) == 1 assert feat[0].shape == torch.Size([1, 64, 64, 64]) # Test HourglassNet-104 model = HourglassNet( num_stacks=2, stage_channels=(64, 64, 96, 96, 96, 128), feat_channel=64) model.train() imgs = torch.randn(1, 3, 256, 256) feat = model(imgs) assert len(feat) == 2 assert feat[0].shape == torch.Size([1, 64, 64, 64]) assert feat[1].shape == torch.Size([1, 64, 64, 64]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_hrnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones.hrnet import HRModule, HRNet from mmdet.models.backbones.resnet import BasicBlock, Bottleneck @pytest.mark.parametrize('block', [BasicBlock, Bottleneck]) def test_hrmodule(block): # Test multiscale forward num_channles = (32, 64) in_channels = [c * block.expansion for c in num_channles] hrmodule = HRModule( num_branches=2, blocks=block, in_channels=in_channels, num_blocks=(4, 4), num_channels=num_channles, ) feats = [ torch.randn(1, in_channels[0], 64, 64), torch.randn(1, in_channels[1], 32, 32) ] feats = hrmodule(feats) assert len(feats) == 2 assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64]) assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32]) # Test single scale forward num_channles = (32, 64) in_channels = [c * block.expansion for c in num_channles] hrmodule = HRModule( num_branches=2, blocks=block, in_channels=in_channels, num_blocks=(4, 4), num_channels=num_channles, multiscale_output=False, ) feats = [ torch.randn(1, in_channels[0], 64, 64), torch.randn(1, in_channels[1], 32, 32) ] feats = hrmodule(feats) assert len(feats) == 1 assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64]) def test_hrnet_backbone(): # only have 3 stages extra = dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128))) with pytest.raises(AssertionError): # HRNet now only support 4 stages HRNet(extra=extra) extra['stage4'] = dict( num_modules=3, num_branches=3, # should be 4 block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256)) with pytest.raises(AssertionError): # len(num_blocks) should equal num_branches HRNet(extra=extra) extra['stage4']['num_branches'] = 4 # Test hrnetv2p_w32 model = HRNet(extra=extra) model.init_weights() model.train() imgs = torch.randn(1, 3, 256, 256) feats = model(imgs) assert len(feats) == 4 assert feats[0].shape == torch.Size([1, 32, 64, 64]) assert feats[3].shape == torch.Size([1, 256, 8, 8]) # Test single scale output model = HRNet(extra=extra, multiscale_output=False) model.init_weights() model.train() imgs = torch.randn(1, 3, 256, 256) feats = model(imgs) assert len(feats) == 1 assert feats[0].shape == torch.Size([1, 32, 64, 64]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_mobilenet_v2.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm from mmdet.models.backbones.mobilenet_v2 import MobileNetV2 from .utils import check_norm_state, is_block, is_norm def test_mobilenetv2_backbone(): with pytest.raises(ValueError): # frozen_stages must in range(-1, 8) MobileNetV2(frozen_stages=8) with pytest.raises(ValueError): # out_indices in range(-1, 8) MobileNetV2(out_indices=[8]) # Test MobileNetV2 with first stage frozen frozen_stages = 1 model = MobileNetV2(frozen_stages=frozen_stages) model.train() for mod in model.conv1.modules(): for param in mod.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # Test MobileNetV2 with norm_eval=True model = MobileNetV2(norm_eval=True) model.train() assert check_norm_state(model.modules(), False) # Test MobileNetV2 forward with widen_factor=1.0 model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8)) model.train() assert check_norm_state(model.modules(), True) imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 8 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) assert feat[7].shape == torch.Size((1, 1280, 7, 7)) # Test MobileNetV2 forward with widen_factor=0.5 model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7)) model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 8, 112, 112)) assert feat[1].shape == torch.Size((1, 16, 56, 56)) assert feat[2].shape == torch.Size((1, 16, 28, 28)) assert feat[3].shape == torch.Size((1, 32, 14, 14)) assert feat[4].shape == torch.Size((1, 48, 14, 14)) assert feat[5].shape == torch.Size((1, 80, 7, 7)) assert feat[6].shape == torch.Size((1, 160, 7, 7)) # Test MobileNetV2 forward with widen_factor=2.0 model = MobileNetV2(widen_factor=2.0, out_indices=range(0, 8)) model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert feat[0].shape == torch.Size((1, 32, 112, 112)) assert feat[1].shape == torch.Size((1, 48, 56, 56)) assert feat[2].shape == torch.Size((1, 64, 28, 28)) assert feat[3].shape == torch.Size((1, 128, 14, 14)) assert feat[4].shape == torch.Size((1, 192, 14, 14)) assert feat[5].shape == torch.Size((1, 320, 7, 7)) assert feat[6].shape == torch.Size((1, 640, 7, 7)) assert feat[7].shape == torch.Size((1, 2560, 7, 7)) # Test MobileNetV2 forward with dict(type='ReLU') model = MobileNetV2( widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7)) model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) # Test MobileNetV2 with BatchNorm forward model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7)) for m in model.modules(): if is_norm(m): assert isinstance(m, _BatchNorm) model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) # Test MobileNetV2 with GroupNorm forward model = MobileNetV2( widen_factor=1.0, norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), out_indices=range(0, 7)) for m in model.modules(): if is_norm(m): assert isinstance(m, GroupNorm) model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) # Test MobileNetV2 with layers 1, 3, 5 out forward model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4)) model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 3 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 32, 28, 28)) assert feat[2].shape == torch.Size((1, 96, 14, 14)) # Test MobileNetV2 with checkpoint forward model = MobileNetV2( widen_factor=1.0, with_cp=True, out_indices=range(0, 7)) for m in model.modules(): if is_block(m): assert m.with_cp model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 7 assert feat[0].shape == torch.Size((1, 16, 112, 112)) assert feat[1].shape == torch.Size((1, 24, 56, 56)) assert feat[2].shape == torch.Size((1, 32, 28, 28)) assert feat[3].shape == torch.Size((1, 64, 14, 14)) assert feat[4].shape == torch.Size((1, 96, 14, 14)) assert feat[5].shape == torch.Size((1, 160, 7, 7)) assert feat[6].shape == torch.Size((1, 320, 7, 7)) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_pvt.py ================================================ import pytest import torch from mmdet.models.backbones.pvt import (PVTEncoderLayer, PyramidVisionTransformer, PyramidVisionTransformerV2) def test_pvt_block(): # test PVT structure and forward block = PVTEncoderLayer( embed_dims=64, num_heads=4, feedforward_channels=256) assert block.ffn.embed_dims == 64 assert block.attn.num_heads == 4 assert block.ffn.feedforward_channels == 256 x = torch.randn(1, 56 * 56, 64) x_out = block(x, (56, 56)) assert x_out.shape == torch.Size([1, 56 * 56, 64]) def test_pvt(): """Test PVT backbone.""" with pytest.raises(TypeError): # Pretrained arg must be str or None. PyramidVisionTransformer(pretrained=123) # test pretrained image size with pytest.raises(AssertionError): PyramidVisionTransformer(pretrain_img_size=(224, 224, 224)) # Test absolute position embedding temp = torch.randn((1, 3, 224, 224)) model = PyramidVisionTransformer( pretrain_img_size=224, use_abs_pos_embed=True) model.init_weights() model(temp) # Test normal inference temp = torch.randn((1, 3, 32, 32)) model = PyramidVisionTransformer() outs = model(temp) assert outs[0].shape == (1, 64, 8, 8) assert outs[1].shape == (1, 128, 4, 4) assert outs[2].shape == (1, 320, 2, 2) assert outs[3].shape == (1, 512, 1, 1) # Test abnormal inference size temp = torch.randn((1, 3, 33, 33)) model = PyramidVisionTransformer() outs = model(temp) assert outs[0].shape == (1, 64, 8, 8) assert outs[1].shape == (1, 128, 4, 4) assert outs[2].shape == (1, 320, 2, 2) assert outs[3].shape == (1, 512, 1, 1) # Test abnormal inference size temp = torch.randn((1, 3, 112, 137)) model = PyramidVisionTransformer() outs = model(temp) assert outs[0].shape == (1, 64, 28, 34) assert outs[1].shape == (1, 128, 14, 17) assert outs[2].shape == (1, 320, 7, 8) assert outs[3].shape == (1, 512, 3, 4) def test_pvtv2(): """Test PVTv2 backbone.""" with pytest.raises(TypeError): # Pretrained arg must be str or None. PyramidVisionTransformerV2(pretrained=123) # test pretrained image size with pytest.raises(AssertionError): PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224)) # Test normal inference temp = torch.randn((1, 3, 32, 32)) model = PyramidVisionTransformerV2() outs = model(temp) assert outs[0].shape == (1, 64, 8, 8) assert outs[1].shape == (1, 128, 4, 4) assert outs[2].shape == (1, 320, 2, 2) assert outs[3].shape == (1, 512, 1, 1) # Test abnormal inference size temp = torch.randn((1, 3, 31, 31)) model = PyramidVisionTransformerV2() outs = model(temp) assert outs[0].shape == (1, 64, 8, 8) assert outs[1].shape == (1, 128, 4, 4) assert outs[2].shape == (1, 320, 2, 2) assert outs[3].shape == (1, 512, 1, 1) # Test abnormal inference size temp = torch.randn((1, 3, 112, 137)) model = PyramidVisionTransformerV2() outs = model(temp) assert outs[0].shape == (1, 64, 28, 35) assert outs[1].shape == (1, 128, 14, 18) assert outs[2].shape == (1, 320, 7, 9) assert outs[3].shape == (1, 512, 4, 5) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_regnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones import RegNet regnet_test_data = [ ('regnetx_400mf', dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), [32, 64, 160, 384]), ('regnetx_800mf', dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), [64, 128, 288, 672]), ('regnetx_1.6gf', dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), [72, 168, 408, 912]), ('regnetx_3.2gf', dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), [96, 192, 432, 1008]), ('regnetx_4.0gf', dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), [80, 240, 560, 1360]), ('regnetx_6.4gf', dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), [168, 392, 784, 1624]), ('regnetx_8.0gf', dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), [80, 240, 720, 1920]), ('regnetx_12gf', dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), [224, 448, 896, 2240]), ] @pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data) def test_regnet_backbone(arch_name, arch, out_channels): with pytest.raises(AssertionError): # ResNeXt depth should be in [50, 101, 152] RegNet(arch_name + '233') # Test RegNet with arch_name model = RegNet(arch_name) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8]) assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4]) assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2]) assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1]) # Test RegNet with arch model = RegNet(arch) assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8]) assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4]) assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2]) assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_renext.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones import ResNeXt from mmdet.models.backbones.resnext import Bottleneck as BottleneckX from .utils import is_block def test_renext_bottleneck(): with pytest.raises(AssertionError): # Style must be in ['pytorch', 'caffe'] BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow') # Test ResNeXt Bottleneck structure block = BottleneckX( 64, 64, groups=32, base_width=4, stride=2, style='pytorch') assert block.conv2.stride == (2, 2) assert block.conv2.groups == 32 assert block.conv2.out_channels == 128 # Test ResNeXt Bottleneck with DCN dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) with pytest.raises(AssertionError): # conv_cfg must be None if dcn is not None BottleneckX( 64, 64, groups=32, base_width=4, dcn=dcn, conv_cfg=dict(type='Conv')) BottleneckX(64, 64, dcn=dcn) # Test ResNeXt Bottleneck forward block = BottleneckX(64, 16, groups=32, base_width=4) x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # Test ResNeXt Bottleneck forward with plugins plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ] block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins) x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) def test_resnext_backbone(): with pytest.raises(KeyError): # ResNeXt depth should be in [50, 101, 152] ResNeXt(depth=18) # Test ResNeXt with group 32, base_width 4 model = ResNeXt(depth=50, groups=32, base_width=4) for m in model.modules(): if is_block(m): assert m.conv2.groups == 32 model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 8, 8]) assert feat[1].shape == torch.Size([1, 512, 4, 4]) assert feat[2].shape == torch.Size([1, 1024, 2, 2]) assert feat[3].shape == torch.Size([1, 2048, 1, 1]) regnet_test_data = [ ('regnetx_400mf', dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), [32, 64, 160, 384]), ('regnetx_800mf', dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), [64, 128, 288, 672]), ('regnetx_1.6gf', dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), [72, 168, 408, 912]), ('regnetx_3.2gf', dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), [96, 192, 432, 1008]), ('regnetx_4.0gf', dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), [80, 240, 560, 1360]), ('regnetx_6.4gf', dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), [168, 392, 784, 1624]), ('regnetx_8.0gf', dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), [80, 240, 720, 1920]), ('regnetx_12gf', dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), [224, 448, 896, 2240]), ] ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_res2net.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones import Res2Net from mmdet.models.backbones.res2net import Bottle2neck from .utils import is_block def test_res2net_bottle2neck(): with pytest.raises(AssertionError): # Style must be in ['pytorch', 'caffe'] Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow') with pytest.raises(AssertionError): # Scale must be larger than 1 Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch') # Test Res2Net Bottle2neck structure block = Bottle2neck( 64, 64, base_width=26, stride=2, scales=4, style='pytorch') assert block.scales == 4 # Test Res2Net Bottle2neck with DCN dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) with pytest.raises(AssertionError): # conv_cfg must be None if dcn is not None Bottle2neck( 64, 64, base_width=26, scales=4, dcn=dcn, conv_cfg=dict(type='Conv')) Bottle2neck(64, 64, dcn=dcn) # Test Res2Net Bottle2neck forward block = Bottle2neck(64, 16, base_width=26, scales=4) x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) def test_res2net_backbone(): with pytest.raises(KeyError): # Res2Net depth should be in [50, 101, 152] Res2Net(depth=18) # Test Res2Net with scales 4, base_width 26 model = Res2Net(depth=50, scales=4, base_width=26) for m in model.modules(): if is_block(m): assert m.scales == 4 model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 8, 8]) assert feat[1].shape == torch.Size([1, 512, 4, 4]) assert feat[2].shape == torch.Size([1, 1024, 2, 2]) assert feat[3].shape == torch.Size([1, 2048, 1, 1]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_resnest.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones import ResNeSt from mmdet.models.backbones.resnest import Bottleneck as BottleneckS def test_resnest_bottleneck(): with pytest.raises(AssertionError): # Style must be in ['pytorch', 'caffe'] BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow') # Test ResNeSt Bottleneck structure block = BottleneckS( 2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch') assert block.avd_layer.stride == 2 assert block.conv2.channels == 4 # Test ResNeSt Bottleneck forward block = BottleneckS(16, 4, radix=2, reduction_factor=4) x = torch.randn(2, 16, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([2, 16, 56, 56]) def test_resnest_backbone(): with pytest.raises(KeyError): # ResNeSt depth should be in [50, 101, 152, 200] ResNeSt(depth=18) # Test ResNeSt with radix 2, reduction_factor 4 model = ResNeSt( depth=50, base_channels=4, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) model.train() imgs = torch.randn(2, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([2, 16, 8, 8]) assert feat[1].shape == torch.Size([2, 32, 4, 4]) assert feat[2].shape == torch.Size([2, 64, 2, 2]) assert feat[3].shape == torch.Size([2, 128, 1, 1]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_resnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmcv import assert_params_all_zeros from mmcv.ops import DeformConv2dPack from torch.nn.modules import AvgPool2d, GroupNorm from torch.nn.modules.batchnorm import _BatchNorm from mmdet.models.backbones import ResNet, ResNetV1d from mmdet.models.backbones.resnet import BasicBlock, Bottleneck from mmdet.models.utils import ResLayer, SimplifiedBasicBlock from .utils import check_norm_state, is_block, is_norm def test_resnet_basic_block(): with pytest.raises(AssertionError): # Not implemented yet. dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) BasicBlock(64, 64, dcn=dcn) with pytest.raises(AssertionError): # Not implemented yet. plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3') ] BasicBlock(64, 64, plugins=plugins) with pytest.raises(AssertionError): # Not implemented yet plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2') ] BasicBlock(64, 64, plugins=plugins) # test BasicBlock structure and forward block = BasicBlock(64, 64) assert block.conv1.in_channels == 64 assert block.conv1.out_channels == 64 assert block.conv1.kernel_size == (3, 3) assert block.conv2.in_channels == 64 assert block.conv2.out_channels == 64 assert block.conv2.kernel_size == (3, 3) x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # Test BasicBlock with checkpoint forward block = BasicBlock(64, 64, with_cp=True) assert block.with_cp x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) def test_resnet_bottleneck(): with pytest.raises(AssertionError): # Style must be in ['pytorch', 'caffe'] Bottleneck(64, 64, style='tensorflow') with pytest.raises(AssertionError): # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3' plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv4') ] Bottleneck(64, 16, plugins=plugins) with pytest.raises(AssertionError): # Need to specify different postfix to avoid duplicate plugin name plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3') ] Bottleneck(64, 16, plugins=plugins) with pytest.raises(KeyError): # Plugin type is not supported plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')] Bottleneck(64, 16, plugins=plugins) # Test Bottleneck with checkpoint forward block = Bottleneck(64, 16, with_cp=True) assert block.with_cp x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # Test Bottleneck style block = Bottleneck(64, 64, stride=2, style='pytorch') assert block.conv1.stride == (1, 1) assert block.conv2.stride == (2, 2) block = Bottleneck(64, 64, stride=2, style='caffe') assert block.conv1.stride == (2, 2) assert block.conv2.stride == (1, 1) # Test Bottleneck DCN dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) with pytest.raises(AssertionError): Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv')) block = Bottleneck(64, 64, dcn=dcn) assert isinstance(block.conv2, DeformConv2dPack) # Test Bottleneck forward block = Bottleneck(64, 16) x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # Test Bottleneck with 1 ContextBlock after conv3 plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3') ] block = Bottleneck(64, 16, plugins=plugins) assert block.context_block.in_channels == 64 x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # Test Bottleneck with 1 GeneralizedAttention after conv2 plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2') ] block = Bottleneck(64, 16, plugins=plugins) assert block.gen_attention_block.in_channels == 16 x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D # after conv2, 1 ContextBlock after conv3 plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3') ] block = Bottleneck(64, 16, plugins=plugins) assert block.gen_attention_block.in_channels == 16 assert block.nonlocal_block.in_channels == 16 assert block.context_block.in_channels == 64 x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after # conv3 plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), position='after_conv2'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), position='after_conv3'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3), position='after_conv3') ] block = Bottleneck(64, 16, plugins=plugins) assert block.context_block1.in_channels == 16 assert block.context_block2.in_channels == 64 assert block.context_block3.in_channels == 64 x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) def test_simplied_basic_block(): with pytest.raises(AssertionError): # Not implemented yet. dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) SimplifiedBasicBlock(64, 64, dcn=dcn) with pytest.raises(AssertionError): # Not implemented yet. plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3') ] SimplifiedBasicBlock(64, 64, plugins=plugins) with pytest.raises(AssertionError): # Not implemented yet plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2') ] SimplifiedBasicBlock(64, 64, plugins=plugins) with pytest.raises(AssertionError): # Not implemented yet SimplifiedBasicBlock(64, 64, with_cp=True) # test SimplifiedBasicBlock structure and forward block = SimplifiedBasicBlock(64, 64) assert block.conv1.in_channels == 64 assert block.conv1.out_channels == 64 assert block.conv1.kernel_size == (3, 3) assert block.conv2.in_channels == 64 assert block.conv2.out_channels == 64 assert block.conv2.kernel_size == (3, 3) x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # test SimplifiedBasicBlock without norm block = SimplifiedBasicBlock(64, 64, norm_cfg=None) assert block.norm1 is None assert block.norm2 is None x_out = block(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) def test_resnet_res_layer(): # Test ResLayer of 3 Bottleneck w\o downsample layer = ResLayer(Bottleneck, 64, 16, 3) assert len(layer) == 3 assert layer[0].conv1.in_channels == 64 assert layer[0].conv1.out_channels == 16 for i in range(1, len(layer)): assert layer[i].conv1.in_channels == 64 assert layer[i].conv1.out_channels == 16 for i in range(len(layer)): assert layer[i].downsample is None x = torch.randn(1, 64, 56, 56) x_out = layer(x) assert x_out.shape == torch.Size([1, 64, 56, 56]) # Test ResLayer of 3 Bottleneck with downsample layer = ResLayer(Bottleneck, 64, 64, 3) assert layer[0].downsample[0].out_channels == 256 for i in range(1, len(layer)): assert layer[i].downsample is None x = torch.randn(1, 64, 56, 56) x_out = layer(x) assert x_out.shape == torch.Size([1, 256, 56, 56]) # Test ResLayer of 3 Bottleneck with stride=2 layer = ResLayer(Bottleneck, 64, 64, 3, stride=2) assert layer[0].downsample[0].out_channels == 256 assert layer[0].downsample[0].stride == (2, 2) for i in range(1, len(layer)): assert layer[i].downsample is None x = torch.randn(1, 64, 56, 56) x_out = layer(x) assert x_out.shape == torch.Size([1, 256, 28, 28]) # Test ResLayer of 3 Bottleneck with stride=2 and average downsample layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True) assert isinstance(layer[0].downsample[0], AvgPool2d) assert layer[0].downsample[1].out_channels == 256 assert layer[0].downsample[1].stride == (1, 1) for i in range(1, len(layer)): assert layer[i].downsample is None x = torch.randn(1, 64, 56, 56) x_out = layer(x) assert x_out.shape == torch.Size([1, 256, 28, 28]) # Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False) assert layer[2].downsample[0].out_channels == 64 assert layer[2].downsample[0].stride == (2, 2) for i in range(len(layer) - 1): assert layer[i].downsample is None x = torch.randn(1, 64, 56, 56) x_out = layer(x) assert x_out.shape == torch.Size([1, 64, 28, 28]) def test_resnest_stem(): # Test default stem_channels model = ResNet(50) assert model.stem_channels == 64 assert model.conv1.out_channels == 64 assert model.norm1.num_features == 64 # Test default stem_channels, with base_channels=3 model = ResNet(50, base_channels=3) assert model.stem_channels == 3 assert model.conv1.out_channels == 3 assert model.norm1.num_features == 3 assert model.layer1[0].conv1.in_channels == 3 # Test stem_channels=3 model = ResNet(50, stem_channels=3) assert model.stem_channels == 3 assert model.conv1.out_channels == 3 assert model.norm1.num_features == 3 assert model.layer1[0].conv1.in_channels == 3 # Test stem_channels=3, with base_channels=2 model = ResNet(50, stem_channels=3, base_channels=2) assert model.stem_channels == 3 assert model.conv1.out_channels == 3 assert model.norm1.num_features == 3 assert model.layer1[0].conv1.in_channels == 3 # Test V1d stem_channels model = ResNetV1d(depth=50, stem_channels=6) model.train() assert model.stem[0].out_channels == 3 assert model.stem[1].num_features == 3 assert model.stem[3].out_channels == 3 assert model.stem[4].num_features == 3 assert model.stem[6].out_channels == 6 assert model.stem[7].num_features == 6 assert model.layer1[0].conv1.in_channels == 6 def test_resnet_backbone(): """Test resnet backbone.""" with pytest.raises(KeyError): # ResNet depth should be in [18, 34, 50, 101, 152] ResNet(20) with pytest.raises(AssertionError): # In ResNet: 1 <= num_stages <= 4 ResNet(50, num_stages=0) with pytest.raises(AssertionError): # len(stage_with_dcn) == num_stages dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) ResNet(50, dcn=dcn, stage_with_dcn=(True, )) with pytest.raises(AssertionError): # len(stage_with_plugin) == num_stages plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True), position='after_conv3') ] ResNet(50, plugins=plugins) with pytest.raises(AssertionError): # In ResNet: 1 <= num_stages <= 4 ResNet(50, num_stages=5) with pytest.raises(AssertionError): # len(strides) == len(dilations) == num_stages ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) with pytest.raises(TypeError): # pretrained must be a string path model = ResNet(50, pretrained=0) with pytest.raises(AssertionError): # Style must be in ['pytorch', 'caffe'] ResNet(50, style='tensorflow') # Test ResNet50 norm_eval=True model = ResNet(50, norm_eval=True, base_channels=1) model.train() assert check_norm_state(model.modules(), False) # Test ResNet50 with torchvision pretrained weight model = ResNet( depth=50, norm_eval=True, pretrained='torchvision://resnet50') model.train() assert check_norm_state(model.modules(), False) # Test ResNet50 with first stage frozen frozen_stages = 1 model = ResNet(50, frozen_stages=frozen_stages, base_channels=1) model.train() assert model.norm1.training is False for layer in [model.conv1, model.norm1]: for param in layer.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # Test ResNet50V1d with first stage frozen model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2) assert len(model.stem) == 9 model.train() assert check_norm_state(model.stem, False) for param in model.stem.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # Test ResNet18 forward model = ResNet(18) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 64, 8, 8]) assert feat[1].shape == torch.Size([1, 128, 4, 4]) assert feat[2].shape == torch.Size([1, 256, 2, 2]) assert feat[3].shape == torch.Size([1, 512, 1, 1]) # Test ResNet18 with checkpoint forward model = ResNet(18, with_cp=True) for m in model.modules(): if is_block(m): assert m.with_cp # Test ResNet50 with BatchNorm forward model = ResNet(50, base_channels=1) for m in model.modules(): if is_norm(m): assert isinstance(m, _BatchNorm) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 4, 8, 8]) assert feat[1].shape == torch.Size([1, 8, 4, 4]) assert feat[2].shape == torch.Size([1, 16, 2, 2]) assert feat[3].shape == torch.Size([1, 32, 1, 1]) # Test ResNet50 with layers 1, 2, 3 out forward model = ResNet(50, out_indices=(0, 1, 2), base_channels=1) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 3 assert feat[0].shape == torch.Size([1, 4, 8, 8]) assert feat[1].shape == torch.Size([1, 8, 4, 4]) assert feat[2].shape == torch.Size([1, 16, 2, 2]) # Test ResNet50 with checkpoint forward model = ResNet(50, with_cp=True, base_channels=1) for m in model.modules(): if is_block(m): assert m.with_cp model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 4, 8, 8]) assert feat[1].shape == torch.Size([1, 8, 4, 4]) assert feat[2].shape == torch.Size([1, 16, 2, 2]) assert feat[3].shape == torch.Size([1, 32, 1, 1]) # Test ResNet50 with GroupNorm forward model = ResNet( 50, base_channels=4, norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) for m in model.modules(): if is_norm(m): assert isinstance(m, GroupNorm) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 16, 8, 8]) assert feat[1].shape == torch.Size([1, 32, 4, 4]) assert feat[2].shape == torch.Size([1, 64, 2, 2]) assert feat[3].shape == torch.Size([1, 128, 1, 1]) # Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D # after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4 plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, True, True, True), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, False), position='after_conv3') ] model = ResNet(50, plugins=plugins, base_channels=8) for m in model.layer1.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert not hasattr(m, 'gen_attention_block') assert m.nonlocal_block.in_channels == 8 for m in model.layer2.modules(): if is_block(m): assert m.nonlocal_block.in_channels == 16 assert m.gen_attention_block.in_channels == 16 assert m.context_block.in_channels == 64 for m in model.layer3.modules(): if is_block(m): assert m.nonlocal_block.in_channels == 32 assert m.gen_attention_block.in_channels == 32 assert m.context_block.in_channels == 128 for m in model.layer4.modules(): if is_block(m): assert m.nonlocal_block.in_channels == 64 assert m.gen_attention_block.in_channels == 64 assert not hasattr(m, 'context_block') model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 32, 8, 8]) assert feat[1].shape == torch.Size([1, 64, 4, 4]) assert feat[2].shape == torch.Size([1, 128, 2, 2]) assert feat[3].shape == torch.Size([1, 256, 1, 1]) # Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after # conv3 in layers 2, 3, 4 plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), stages=(False, True, True, False), position='after_conv3'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), stages=(False, True, True, False), position='after_conv3') ] model = ResNet(50, plugins=plugins, base_channels=8) for m in model.layer1.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert not hasattr(m, 'context_block1') assert not hasattr(m, 'context_block2') for m in model.layer2.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert m.context_block1.in_channels == 64 assert m.context_block2.in_channels == 64 for m in model.layer3.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert m.context_block1.in_channels == 128 assert m.context_block2.in_channels == 128 for m in model.layer4.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert not hasattr(m, 'context_block1') assert not hasattr(m, 'context_block2') model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 32, 8, 8]) assert feat[1].shape == torch.Size([1, 64, 4, 4]) assert feat[2].shape == torch.Size([1, 128, 2, 2]) assert feat[3].shape == torch.Size([1, 256, 1, 1]) # Test ResNet50 zero initialization of residual model = ResNet(50, zero_init_residual=True, base_channels=1) model.init_weights() for m in model.modules(): if isinstance(m, Bottleneck): assert assert_params_all_zeros(m.norm3) elif isinstance(m, BasicBlock): assert assert_params_all_zeros(m.norm2) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 4, 8, 8]) assert feat[1].shape == torch.Size([1, 8, 4, 4]) assert feat[2].shape == torch.Size([1, 16, 2, 2]) assert feat[3].shape == torch.Size([1, 32, 1, 1]) # Test ResNetV1d forward model = ResNetV1d(depth=50, base_channels=2) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 8, 8, 8]) assert feat[1].shape == torch.Size([1, 16, 4, 4]) assert feat[2].shape == torch.Size([1, 32, 2, 2]) assert feat[3].shape == torch.Size([1, 64, 1, 1]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_swin.py ================================================ import pytest import torch from mmdet.models.backbones.swin import SwinBlock, SwinTransformer def test_swin_block(): # test SwinBlock structure and forward block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256) assert block.ffn.embed_dims == 64 assert block.attn.w_msa.num_heads == 4 assert block.ffn.feedforward_channels == 256 x = torch.randn(1, 56 * 56, 64) x_out = block(x, (56, 56)) assert x_out.shape == torch.Size([1, 56 * 56, 64]) # Test BasicBlock with checkpoint forward block = SwinBlock( embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True) assert block.with_cp x = torch.randn(1, 56 * 56, 64) x_out = block(x, (56, 56)) assert x_out.shape == torch.Size([1, 56 * 56, 64]) def test_swin_transformer(): """Test Swin Transformer backbone.""" with pytest.raises(TypeError): # Pretrained arg must be str or None. SwinTransformer(pretrained=123) with pytest.raises(AssertionError): # Because swin uses non-overlapping patch embed, so the stride of patch # embed must be equal to patch size. SwinTransformer(strides=(2, 2, 2, 2), patch_size=4) # test pretrained image size with pytest.raises(AssertionError): SwinTransformer(pretrain_img_size=(224, 224, 224)) # Test absolute position embedding temp = torch.randn((1, 3, 224, 224)) model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True) model.init_weights() model(temp) # Test different inputs when use absolute position embedding temp = torch.randn((1, 3, 112, 112)) model(temp) temp = torch.randn((1, 3, 256, 256)) model(temp) # Test patch norm model = SwinTransformer(patch_norm=False) model(temp) # Test normal inference temp = torch.randn((1, 3, 32, 32)) model = SwinTransformer() outs = model(temp) assert outs[0].shape == (1, 96, 8, 8) assert outs[1].shape == (1, 192, 4, 4) assert outs[2].shape == (1, 384, 2, 2) assert outs[3].shape == (1, 768, 1, 1) # Test abnormal inference size temp = torch.randn((1, 3, 31, 31)) model = SwinTransformer() outs = model(temp) assert outs[0].shape == (1, 96, 8, 8) assert outs[1].shape == (1, 192, 4, 4) assert outs[2].shape == (1, 384, 2, 2) assert outs[3].shape == (1, 768, 1, 1) # Test abnormal inference size temp = torch.randn((1, 3, 112, 137)) model = SwinTransformer() outs = model(temp) assert outs[0].shape == (1, 96, 28, 35) assert outs[1].shape == (1, 192, 14, 18) assert outs[2].shape == (1, 384, 7, 9) assert outs[3].shape == (1, 768, 4, 5) model = SwinTransformer(frozen_stages=4) model.train() for p in model.parameters(): assert not p.requires_grad ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/test_trident_resnet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.backbones import TridentResNet from mmdet.models.backbones.trident_resnet import TridentBottleneck def test_trident_resnet_bottleneck(): trident_dilations = (1, 2, 3) test_branch_idx = 1 concat_output = True trident_build_config = (trident_dilations, test_branch_idx, concat_output) with pytest.raises(AssertionError): # Style must be in ['pytorch', 'caffe'] TridentBottleneck( *trident_build_config, inplanes=64, planes=64, style='tensorflow') with pytest.raises(AssertionError): # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3' plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv4') ] TridentBottleneck( *trident_build_config, inplanes=64, planes=16, plugins=plugins) with pytest.raises(AssertionError): # Need to specify different postfix to avoid duplicate plugin name plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3') ] TridentBottleneck( *trident_build_config, inplanes=64, planes=16, plugins=plugins) with pytest.raises(KeyError): # Plugin type is not supported plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')] TridentBottleneck( *trident_build_config, inplanes=64, planes=16, plugins=plugins) # Test Bottleneck with checkpoint forward block = TridentBottleneck( *trident_build_config, inplanes=64, planes=16, with_cp=True) assert block.with_cp x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56]) # Test Bottleneck style block = TridentBottleneck( *trident_build_config, inplanes=64, planes=64, stride=2, style='pytorch') assert block.conv1.stride == (1, 1) assert block.conv2.stride == (2, 2) block = TridentBottleneck( *trident_build_config, inplanes=64, planes=64, stride=2, style='caffe') assert block.conv1.stride == (2, 2) assert block.conv2.stride == (1, 1) # Test Bottleneck forward block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16) x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56]) # Test Bottleneck with 1 ContextBlock after conv3 plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3') ] block = TridentBottleneck( *trident_build_config, inplanes=64, planes=16, plugins=plugins) assert block.context_block.in_channels == 64 x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56]) # Test Bottleneck with 1 GeneralizedAttention after conv2 plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2') ] block = TridentBottleneck( *trident_build_config, inplanes=64, planes=16, plugins=plugins) assert block.gen_attention_block.in_channels == 16 x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56]) # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D # after conv2, 1 ContextBlock after conv3 plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16), position='after_conv3') ] block = TridentBottleneck( *trident_build_config, inplanes=64, planes=16, plugins=plugins) assert block.gen_attention_block.in_channels == 16 assert block.nonlocal_block.in_channels == 16 assert block.context_block.in_channels == 64 x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56]) # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after # conv3 plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), position='after_conv2'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), position='after_conv3'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3), position='after_conv3') ] block = TridentBottleneck( *trident_build_config, inplanes=64, planes=16, plugins=plugins) assert block.context_block1.in_channels == 16 assert block.context_block2.in_channels == 64 assert block.context_block3.in_channels == 64 x = torch.randn(1, 64, 56, 56) x_out = block(x) assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56]) def test_trident_resnet_backbone(): tridentresnet_config = dict( num_branch=3, test_branch_idx=1, strides=(1, 2, 2), dilations=(1, 1, 1), trident_dilations=(1, 2, 3), out_indices=(2, ), ) """Test tridentresnet backbone.""" with pytest.raises(AssertionError): # TridentResNet depth should be in [50, 101, 152] TridentResNet(18, **tridentresnet_config) with pytest.raises(AssertionError): # In TridentResNet: num_stages == 3 TridentResNet(50, num_stages=4, **tridentresnet_config) model = TridentResNet(50, num_stages=3, **tridentresnet_config) model.train() imgs = torch.randn(1, 3, 32, 32) feat = model(imgs) assert len(feat) == 1 assert feat[0].shape == torch.Size([3, 1024, 2, 2]) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_backbones/utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from torch.nn.modules import GroupNorm from torch.nn.modules.batchnorm import _BatchNorm from mmdet.models.backbones.res2net import Bottle2neck from mmdet.models.backbones.resnet import BasicBlock, Bottleneck from mmdet.models.backbones.resnext import Bottleneck as BottleneckX from mmdet.models.utils import SimplifiedBasicBlock def is_block(modules): """Check if is ResNet building block.""" if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck, SimplifiedBasicBlock)): return True return False def is_norm(modules): """Check if is one of the norms.""" if isinstance(modules, (GroupNorm, _BatchNorm)): return True return False def check_norm_state(modules, train_state): """Check if norm layer is in correct train state.""" for mod in modules: if isinstance(mod, _BatchNorm): if mod.training != train_state: return False return True ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_anchor_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import AnchorHead def test_anchor_head_loss(): """Tests anchor head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False)) self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg) # Anchor head expects a multiple levels of features per image feat = [ torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))) for i in range(len(self.anchor_generator.strides)) ] cls_scores, bbox_preds = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_atss_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import ATSSHead def test_atss_head_loss(): """Tests atss head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False)) self = ATSSHead( num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] cls_scores, bbox_preds, centernesses = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) empty_centerness_loss = sum(empty_gt_losses['loss_centerness']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') assert empty_centerness_loss.item() == 0, ( 'there should be no centerness loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) onegt_centerness_loss = sum(one_gt_losses['loss_centerness']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' assert onegt_centerness_loss.item() > 0, ( 'centerness loss should be non-zero') ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_autoassign_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads.autoassign_head import AutoAssignHead from mmdet.models.dense_heads.paa_head import levels_to_images def test_autoassign_head_loss(): """Tests autoassign head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False)) self = AutoAssignHead( num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] self.init_weights() cls_scores, bbox_preds, objectnesses = self(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_pos_loss = empty_gt_losses['loss_pos'] empty_neg_loss = empty_gt_losses['loss_neg'] empty_center_loss = empty_gt_losses['loss_center'] assert empty_neg_loss.item() > 0, 'cls loss should be non-zero' assert empty_pos_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') assert empty_center_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_pos_loss = one_gt_losses['loss_pos'] onegt_neg_loss = one_gt_losses['loss_neg'] onegt_center_loss = one_gt_losses['loss_center'] assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero' assert onegt_neg_loss.item() > 0, 'box loss should be non-zero' assert onegt_center_loss.item() > 0, 'box loss should be non-zero' n, c, h, w = 10, 4, 20, 20 mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)] results = levels_to_images(mlvl_tensor) assert len(results) == n assert results[0].size() == (h * w * 5, c) self = AutoAssignHead( num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), strides=(4, )) cls_scores = [torch.ones(2, 4, 5, 5)] bbox_preds = [torch.ones(2, 4, 5, 5)] iou_preds = [torch.ones(2, 1, 5, 5)] cfg = mmcv.Config( dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) rescale = False self.get_bboxes( cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_centernet_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmcv import ConfigDict from mmdet.models.dense_heads import CenterNetHead def test_center_head_loss(): """Tests center head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] test_cfg = dict(topK=100, max_per_img=100) self = CenterNetHead( num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg) feat = [torch.rand(1, 1, s, s)] center_out, wh_out, offset_out = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) loss_center = empty_gt_losses['loss_center_heatmap'] loss_wh = empty_gt_losses['loss_wh'] loss_offset = empty_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() == 0, ( 'there should be no loss_wh when there are no true boxes') assert loss_offset.item() == 0, ( 'there should be no loss_offset when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) loss_center = one_gt_losses['loss_center_heatmap'] loss_wh = one_gt_losses['loss_wh'] loss_offset = one_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() > 0, 'loss_wh should be non-zero' assert loss_offset.item() > 0, 'loss_offset should be non-zero' def test_centernet_head_get_bboxes(): """Tests center head generating and decoding the heatmap.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': np.array([1., 1., 1., 1.]), 'pad_shape': (s, s, 3), 'batch_input_shape': (s, s), 'border': (0, 0, 0, 0), 'flip': False }] test_cfg = ConfigDict( dict(topk=100, local_maximum_kernel=3, max_per_img=100)) gt_bboxes = [ torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200], [10, 20, 100, 240]]) ] gt_labels = [torch.LongTensor([1, 1, 2])] self = CenterNetHead( num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg) self.feat_shape = (1, 1, s // 4, s // 4) targets, _ = self.get_targets(gt_bboxes, gt_labels, self.feat_shape, img_metas[0]['pad_shape']) center_target = targets['center_heatmap_target'] wh_target = targets['wh_target'] offset_target = targets['offset_target'] # make sure assign target right for i in range(len(gt_bboxes[0])): bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i] ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2 int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(sum(bbox[1::2]) / 2) w, h = bbox[2] - bbox[0], bbox[3] - bbox[1] x_off = ctx - int(ctx) y_off = cty - int(cty) assert center_target[0, label, int_cty, int_ctx] == 1 assert wh_target[0, 0, int_cty, int_ctx] == w assert wh_target[0, 1, int_cty, int_ctx] == h assert offset_target[0, 0, int_cty, int_ctx] == x_off assert offset_target[0, 1, int_cty, int_ctx] == y_off # make sure get_bboxes is right detections = self.get_bboxes([center_target], [wh_target], [offset_target], img_metas, rescale=True, with_nms=False) out_bboxes = detections[0][0][:3] out_clses = detections[0][1][:3] for bbox, cls in zip(out_bboxes, out_clses): flag = False for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]): if (bbox[:4] == gt_bbox[:4]).all(): flag = True assert flag, 'get_bboxes is wrong' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_corner_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps from mmdet.models.dense_heads import CornerHead def test_corner_head_loss(): """Tests corner head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] self = CornerHead(num_classes=4, in_channels=1) # Corner head expects a multiple levels of features per image feat = [ torch.rand(1, 1, s // 4, s // 4) for _ in range(self.num_feat_levels) ] tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) empty_det_loss = sum(empty_gt_losses['det_loss']) empty_push_loss = sum(empty_gt_losses['push_loss']) empty_pull_loss = sum(empty_gt_losses['pull_loss']) empty_off_loss = sum(empty_gt_losses['off_loss']) assert empty_det_loss.item() > 0, 'det loss should be non-zero' assert empty_push_loss.item() == 0, ( 'there should be no push loss when there are no true boxes') assert empty_pull_loss.item() == 0, ( 'there should be no pull loss when there are no true boxes') assert empty_off_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_det_loss = sum(one_gt_losses['det_loss']) onegt_push_loss = sum(one_gt_losses['push_loss']) onegt_pull_loss = sum(one_gt_losses['pull_loss']) onegt_off_loss = sum(one_gt_losses['off_loss']) assert onegt_det_loss.item() > 0, 'det loss should be non-zero' assert onegt_push_loss.item() == 0, ( 'there should be no push loss when there are only one true box') assert onegt_pull_loss.item() > 0, 'pull loss should be non-zero' assert onegt_off_loss.item() > 0, 'off loss should be non-zero' gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874], [123.6667, 123.8757, 138.6326, 251.8874]]), ] gt_labels = [torch.LongTensor([2, 3])] # equalize the corners' embedding value of different objects to make the # push_loss larger than 0 gt_bboxes_ind = (gt_bboxes[0] // 4).int().tolist() for tl_emb_feat, br_emb_feat in zip(tl_embs, br_embs): tl_emb_feat[:, :, gt_bboxes_ind[0][1], gt_bboxes_ind[0][0]] = tl_emb_feat[:, :, gt_bboxes_ind[1][1], gt_bboxes_ind[1][0]] br_emb_feat[:, :, gt_bboxes_ind[0][3], gt_bboxes_ind[0][2]] = br_emb_feat[:, :, gt_bboxes_ind[1][3], gt_bboxes_ind[1][2]] two_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) twogt_det_loss = sum(two_gt_losses['det_loss']) twogt_push_loss = sum(two_gt_losses['push_loss']) twogt_pull_loss = sum(two_gt_losses['pull_loss']) twogt_off_loss = sum(two_gt_losses['off_loss']) assert twogt_det_loss.item() > 0, 'det loss should be non-zero' assert twogt_push_loss.item() > 0, 'push loss should be non-zero' assert twogt_pull_loss.item() > 0, 'pull loss should be non-zero' assert twogt_off_loss.item() > 0, 'off loss should be non-zero' def test_corner_head_encode_and_decode_heatmap(): """Tests corner head generating and decoding the heatmap.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3), 'border': (0, 0, 0, 0) }] gt_bboxes = [ torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200], [10, 20, 200, 240]]) ] gt_labels = [torch.LongTensor([1, 1, 2])] self = CornerHead(num_classes=4, in_channels=1, corner_emb_channels=1) feat = [ torch.rand(1, 1, s // 4, s // 4) for _ in range(self.num_feat_levels) ] targets = self.get_targets( gt_bboxes, gt_labels, feat[0].shape, img_metas[0]['pad_shape'], with_corner_emb=self.with_corner_emb) gt_tl_heatmap = targets['topleft_heatmap'] gt_br_heatmap = targets['bottomright_heatmap'] gt_tl_offset = targets['topleft_offset'] gt_br_offset = targets['bottomright_offset'] embedding = targets['corner_embedding'] [top, left], [bottom, right] = embedding[0][0] gt_tl_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4]) gt_br_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4]) gt_tl_embedding_heatmap[0, 0, top, left] = 1 gt_br_embedding_heatmap[0, 0, bottom, right] = 1 batch_bboxes, batch_scores, batch_clses = self.decode_heatmap( tl_heat=gt_tl_heatmap, br_heat=gt_br_heatmap, tl_off=gt_tl_offset, br_off=gt_br_offset, tl_emb=gt_tl_embedding_heatmap, br_emb=gt_br_embedding_heatmap, img_meta=img_metas[0], k=100, kernel=3, distance_threshold=0.5) bboxes = batch_bboxes.view(-1, 4) scores = batch_scores.view(-1, 1) clses = batch_clses.view(-1, 1) idx = scores.argsort(dim=0, descending=True) bboxes = bboxes[idx].view(-1, 4) scores = scores[idx].view(-1) clses = clses[idx].view(-1) valid_bboxes = bboxes[torch.where(scores > 0.05)] valid_labels = clses[torch.where(scores > 0.05)] max_coordinate = valid_bboxes.max() offsets = valid_labels.to(valid_bboxes) * (max_coordinate + 1) gt_offsets = gt_labels[0].to(gt_bboxes[0]) * (max_coordinate + 1) offset_bboxes = valid_bboxes + offsets[:, None] offset_gtbboxes = gt_bboxes[0] + gt_offsets[:, None] iou_matrix = bbox_overlaps(offset_bboxes.numpy(), offset_gtbboxes.numpy()) assert (iou_matrix == 1).sum() == 3 ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_ddod_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import DDODHead def test_ddod_head_loss(): """Tests ddod head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( # ATSSAssigner assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8), reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5), allowed_border=-1, pos_weight=-1, debug=False)) self = DDODHead( num_classes=4, in_channels=1, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), train_cfg=train_cfg, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), loss_iou=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] cls_scores, bbox_preds, iou_preds = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) empty_iou_loss = sum(empty_gt_losses['loss_iou']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') assert empty_iou_loss.item() == 0, ( 'there should be no iou loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) onegt_iou_loss = sum(one_gt_losses['loss_iou']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' assert onegt_iou_loss.item() > 0, 'iou loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_dense_heads_attr.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import warnings from terminaltables import AsciiTable from mmdet.models import dense_heads from mmdet.models.dense_heads import * # noqa: F401,F403 def test_dense_heads_test_attr(): """Tests inference methods such as simple_test and aug_test.""" # make list of dense heads exceptions = ['FeatureAdaption'] # module used in head all_dense_heads = [m for m in dense_heads.__all__ if m not in exceptions] # search attributes check_attributes = [ 'simple_test', 'aug_test', 'simple_test_bboxes', 'simple_test_rpn', 'aug_test_rpn' ] table_header = ['head name'] + check_attributes table_data = [table_header] not_found = {k: [] for k in check_attributes} for target_head_name in all_dense_heads: target_head = globals()[target_head_name] target_head_attributes = dir(target_head) check_results = [target_head_name] for check_attribute in check_attributes: found = check_attribute in target_head_attributes check_results.append(found) if not found: not_found[check_attribute].append(target_head_name) table_data.append(check_results) table = AsciiTable(table_data) print() print(table.table) # NOTE: this test just checks attributes. # simple_test of RPN heads will not work now. assert len(not_found['simple_test']) == 0, \ f'simple_test not found in {not_found["simple_test"]}' if len(not_found['aug_test']) != 0: warnings.warn(f'aug_test not found in {not_found["aug_test"]}. ' 'Please implement it or raise NotImplementedError.') ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_detr_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv import ConfigDict from mmdet.models.dense_heads import DETRHead def test_detr_head_loss(): """Tests transformer head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3), 'batch_input_shape': (s, s) }] config = ConfigDict( dict( type='DETRHead', num_classes=80, in_channels=200, transformer=dict( type='Transformer', encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), )), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0))) self = DETRHead(**config) self.init_weights() feat = [torch.rand(1, 200, 10, 10)] cls_scores, bbox_preds = self.forward(feat, img_metas) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. for key, loss in empty_gt_losses.items(): if 'cls' in key: assert loss.item() > 0, 'cls loss should be non-zero' elif 'bbox' in key: assert loss.item( ) == 0, 'there should be no box loss when there are no true boxes' elif 'iou' in key: assert loss.item( ) == 0, 'there should be no iou loss when there are no true boxes' # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) for loss in one_gt_losses.values(): assert loss.item( ) > 0, 'cls loss, or box loss, or iou loss should be non-zero' # test forward_train self.forward_train(feat, img_metas, gt_bboxes, gt_labels) # test inference mode self.get_bboxes(cls_scores, bbox_preds, img_metas, rescale=True) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_fcos_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import FCOSHead def test_fcos_head_loss(): """Tests fcos head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # since Focal Loss is not supported on CPU self = FCOSHead( num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] cls_scores, bbox_preds, centerness = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'] empty_box_loss = empty_gt_losses['loss_bbox'] assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'] onegt_box_loss = one_gt_losses['loss_bbox'] assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_fsaf_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import FSAFHead def test_fsaf_head_loss(): """Tests anchor head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] cfg = dict( reg_decoded_bbox=True, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=1, scales_per_octave=1, ratios=[1.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0, reduction='none'), loss_bbox=dict( type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none')) train_cfg = mmcv.Config( dict( assigner=dict( type='CenterRegionAssigner', pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01), allowed_border=-1, pos_weight=-1, debug=False)) head = FSAFHead(num_classes=4, in_channels=1, train_cfg=train_cfg, **cfg) if torch.cuda.is_available(): head.cuda() # FSAF head expects a multiple levels of features per image feat = [ torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))).cuda() for i in range(len(head.anchor_generator.strides)) ] cls_scores, bbox_preds = head.forward(feat) gt_bboxes_ignore = None # When truth is non-empty then both cls and box loss should be nonzero # for random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(), ] gt_labels = [torch.LongTensor([2]).cuda()] one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' # Test that empty ground truth encourages the network to predict bkg gt_bboxes = [torch.empty((0, 4)).cuda()] gt_labels = [torch.LongTensor([]).cuda()] empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there # should be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_ga_anchor_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import GuidedAnchorHead def test_ga_anchor_head_loss(): """Tests anchor head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5, pos_weight=-1, debug=False)) head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg) # Anchor head expects a multiple levels of features per image if torch.cuda.is_available(): head.cuda() feat = [ torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda() for i in range(len(head.approx_anchor_generator.base_anchors)) ] cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat) # Test that empty ground truth encourages the network to predict # background gt_bboxes = [torch.empty((0, 4)).cuda()] gt_labels = [torch.LongTensor([]).cuda()] gt_bboxes_ignore = None empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there # should be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero # for random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(), ] gt_labels = [torch.LongTensor([2]).cuda()] one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_gfl_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import GFLHead def test_gfl_head_loss(): """Tests gfl head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False)) self = GFLHead( num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] cls_scores, bbox_preds = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) empty_dfl_loss = sum(empty_gt_losses['loss_dfl']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') assert empty_dfl_loss.item() == 0, ( 'there should be no dfl loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) onegt_dfl_loss = sum(one_gt_losses['loss_dfl']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' assert onegt_dfl_loss.item() > 0, 'dfl loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_lad_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch from mmdet.models.dense_heads import LADHead, lad_head from mmdet.models.dense_heads.lad_head import levels_to_images def test_lad_head_loss(): """Tests lad head loss when truth is empty and non-empty.""" class mock_skm: def GaussianMixture(self, *args, **kwargs): return self def fit(self, loss): pass def predict(self, loss): components = np.zeros_like(loss, dtype=np.long) return components.reshape(-1) def score_samples(self, loss): scores = np.random.random(len(loss)) return scores lad_head.skm = mock_skm() s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # since Focal Loss is not supported on CPU self = LADHead( num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)) teacher_model = LADHead( num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] self.init_weights() teacher_model.init_weights() # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None outs_teacher = teacher_model(feat) label_assignment_results = teacher_model.get_label_assignment( *outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) outs = teacher_model(feat) empty_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore, label_assignment_results) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'] empty_box_loss = empty_gt_losses['loss_bbox'] empty_iou_loss = empty_gt_losses['loss_iou'] assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') assert empty_iou_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] label_assignment_results = teacher_model.get_label_assignment( *outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) one_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore, label_assignment_results) onegt_cls_loss = one_gt_losses['loss_cls'] onegt_box_loss = one_gt_losses['loss_bbox'] onegt_iou_loss = one_gt_losses['loss_iou'] assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' assert onegt_iou_loss.item() > 0, 'box loss should be non-zero' n, c, h, w = 10, 4, 20, 20 mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)] results = levels_to_images(mlvl_tensor) assert len(results) == n assert results[0].size() == (h * w * 5, c) assert self.with_score_voting self = LADHead( num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)) cls_scores = [torch.ones(2, 4, 5, 5)] bbox_preds = [torch.ones(2, 4, 5, 5)] iou_preds = [torch.ones(2, 1, 5, 5)] cfg = mmcv.Config( dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) rescale = False self.get_bboxes( cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_ld_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import GFLHead, LDHead def test_ld_head_loss(): """Tests vfnet head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1), allowed_border=-1, pos_weight=-1, debug=False)) self = LDHead( num_classes=4, in_channels=1, train_cfg=train_cfg, loss_ld=dict(type='KnowledgeDistillationKLDivLoss', loss_weight=1.0), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128])) teacher_model = GFLHead( num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128])) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] cls_scores, bbox_preds = self.forward(feat) rand_soft_target = teacher_model.forward(feat)[1] # Test that empty ground truth encourages the network to predict # background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, rand_soft_target, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero, ld loss should # be non-negative but there should be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) empty_ld_loss = sum(empty_gt_losses['loss_ld']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') assert empty_ld_loss.item() >= 0, 'ld loss should be non-negative' # When truth is non-empty then both cls and box loss should be nonzero # for random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, rand_soft_target, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' gt_bboxes_ignore = gt_bboxes # When truth is non-empty but ignored then the cls loss should be nonzero, # but there should be no box loss. ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, rand_soft_target, img_metas, gt_bboxes_ignore) ignore_cls_loss = sum(ignore_gt_losses['loss_cls']) ignore_box_loss = sum(ignore_gt_losses['loss_bbox']) assert ignore_cls_loss.item() > 0, 'cls loss should be non-zero' assert ignore_box_loss.item() == 0, 'gt bbox ignored loss should be zero' # When truth is non-empty and not ignored then both cls and box loss should # be nonzero for random inputs gt_bboxes_ignore = [torch.randn(1, 4)] not_ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, rand_soft_target, img_metas, gt_bboxes_ignore) not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls']) not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox']) assert not_ignore_cls_loss.item() > 0, 'cls loss should be non-zero' assert not_ignore_box_loss.item( ) > 0, 'gt bbox not ignored loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_mask2former_head.py ================================================ import numpy as np import pytest import torch from mmcv import ConfigDict from mmdet.core.mask import BitmapMasks from mmdet.models.dense_heads import Mask2FormerHead @pytest.mark.parametrize('num_stuff_classes, \ label_num', [(53, 100), (0, 80)]) def test_mask2former_head_loss(num_stuff_classes, label_num): """Tests head loss when truth is empty and non-empty. Tests head loss as Panoptic Segmentation and Instance Segmentation. Tests forward_train and simple_test with masks and None as gt_semantic_seg """ self = _init_model(num_stuff_classes) img_metas = [{ 'batch_input_shape': (128, 160), 'pad_shape': (128, 160, 3), 'img_shape': (126, 160, 3), 'ori_shape': (63, 80, 3) }, { 'batch_input_shape': (128, 160), 'pad_shape': (128, 160, 3), 'img_shape': (120, 160, 3), 'ori_shape': (60, 80, 3) }] feats = [ torch.rand((2, 64 * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i))) for i in range(4) ] all_cls_scores, all_mask_preds = self.forward(feats, img_metas) # Test that empty ground truth encourages the network to predict background gt_labels_list = [torch.LongTensor([]), torch.LongTensor([])] gt_masks_list = [ torch.zeros((0, 128, 160)).long(), torch.zeros((0, 128, 160)).long() ] empty_gt_losses = self.loss(all_cls_scores, all_mask_preds, gt_labels_list, gt_masks_list, img_metas) # When there is no truth, the cls loss should be nonzero but there should # be no mask loss. for key, loss in empty_gt_losses.items(): if 'cls' in key: assert loss.item() > 0, 'cls loss should be non-zero' elif 'mask' in key: assert loss.item( ) == 0, 'there should be no mask loss when there are no true mask' elif 'dice' in key: assert loss.item( ) == 0, 'there should be no dice loss when there are no true mask' # when truth is non-empty then both cls, mask, dice loss should be nonzero # random inputs gt_labels_list = [ torch.tensor([10, label_num]).long(), torch.tensor([label_num, 10]).long() ] mask1 = torch.zeros((2, 128, 160)).long() mask1[0, :50] = 1 mask1[1, 50:] = 1 mask2 = torch.zeros((2, 128, 160)).long() mask2[0, :, :50] = 1 mask2[1, :, 50:] = 1 gt_masks_list = [mask1, mask2] two_gt_losses = self.loss(all_cls_scores, all_mask_preds, gt_labels_list, gt_masks_list, img_metas) for loss in two_gt_losses.values(): assert loss.item() > 0, 'all loss should be non-zero' # test forward_train gt_bboxes = None gt_labels = [ torch.tensor([10]).long(), torch.tensor([10]).long(), ] thing_mask1 = np.zeros((1, 128, 160), dtype=np.int32) thing_mask1[0, :50] = 1 thing_mask2 = np.zeros((1, 128, 160), dtype=np.int32) thing_mask2[0, :, 50:] = 1 gt_masks = [ BitmapMasks(thing_mask1, 128, 160), BitmapMasks(thing_mask2, 128, 160), ] stuff_mask1 = torch.zeros((1, 128, 160)).long() stuff_mask1[0, :50] = 10 stuff_mask1[0, 50:] = 100 stuff_mask2 = torch.zeros((1, 128, 160)).long() stuff_mask2[0, :, 50:] = 10 stuff_mask2[0, :, :50] = 100 gt_semantic_seg = [stuff_mask1, stuff_mask2] self.forward_train(feats, img_metas, gt_bboxes, gt_labels, gt_masks, gt_semantic_seg) # test when gt_semantic_seg is None gt_semantic_seg = None self.forward_train(feats, img_metas, gt_bboxes, gt_labels, gt_masks, gt_semantic_seg) # test inference mode self.simple_test(feats, img_metas) def _init_model(num_stuff_classes): base_channels = 64 num_things_classes = 80 num_classes = num_things_classes + num_stuff_classes config = ConfigDict( dict( type='Mask2FormerHead', in_channels=[base_channels * 2**i for i in range(4)], feat_channels=base_channels, out_channels=base_channels, num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, num_queries=100, num_transformer_feat_level=3, pixel_decoder=dict( type='MSDeformAttnPixelDecoder', num_outs=3, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiScaleDeformableAttention', embed_dims=base_channels, num_heads=8, num_levels=3, num_points=4, im2col_step=64, dropout=0.0, batch_first=False, norm_cfg=None, init_cfg=None), ffn_cfgs=dict( type='FFN', embed_dims=base_channels, feedforward_channels=base_channels * 4, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), feedforward_channels=base_channels * 4, ffn_dropout=0.0, operation_order=('self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=base_channels // 2, normalize=True), init_cfg=None), enforce_decoder_input_project=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=base_channels // 2, normalize=True), transformer_decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=9, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=base_channels, num_heads=8, attn_drop=0.0, proj_drop=0.0, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=base_channels, feedforward_channels=base_channels * 8, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.0, dropout_layer=None, add_identity=True), # the following parameter was not used, # just make current api happy feedforward_channels=base_channels * 8, operation_order=('cross_attn', 'norm', 'self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, reduction='mean', class_weight=[1.0] * num_classes + [0.1]), loss_mask=dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=5.0), loss_dice=dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=5.0), train_cfg=dict( num_points=256, oversample_ratio=3.0, importance_sample_ratio=0.75, assigner=dict( type='MaskHungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=2.0), mask_cost=dict( type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), dice_cost=dict( type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), sampler=dict(type='MaskPseudoSampler')), test_cfg=dict( panoptic_on=True, semantic_on=False, instance_on=True, max_dets_per_image=100, object_mask_thr=0.8, iou_thr=0.8))) self = Mask2FormerHead(**config) self.init_weights() return self ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_maskformer_head.py ================================================ import numpy as np import torch from mmcv import ConfigDict from mmdet.core.mask import BitmapMasks from mmdet.models.dense_heads import MaskFormerHead def test_maskformer_head_loss(): """Tests head loss when truth is empty and non-empty.""" base_channels = 64 # batch_input_shape = (128, 160) img_metas = [{ 'batch_input_shape': (128, 160), 'pad_shape': (128, 160, 3), 'img_shape': (126, 160, 3), 'ori_shape': (63, 80, 3) }, { 'batch_input_shape': (128, 160), 'pad_shape': (128, 160, 3), 'img_shape': (120, 160, 3), 'ori_shape': (60, 80, 3) }] feats = [ torch.rand((2, 64 * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i))) for i in range(4) ] num_things_classes = 80 num_stuff_classes = 53 num_classes = num_things_classes + num_stuff_classes config = ConfigDict( dict( type='MaskFormerHead', in_channels=[base_channels * 2**i for i in range(4)], feat_channels=base_channels, out_channels=base_channels, num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, num_queries=100, pixel_decoder=dict( type='TransformerEncoderPixelDecoder', norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=base_channels, num_heads=8, attn_drop=0.1, proj_drop=0.1, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=base_channels, feedforward_channels=base_channels * 8, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.1, dropout_layer=None, add_identity=True), operation_order=('self_attn', 'norm', 'ffn', 'norm'), norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=base_channels // 2, normalize=True)), enforce_decoder_input_project=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=base_channels // 2, normalize=True), transformer_decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=base_channels, num_heads=8, attn_drop=0.1, proj_drop=0.1, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=base_channels, feedforward_channels=base_channels * 8, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.1, dropout_layer=None, add_identity=True), # the following parameter was not used, # just make current api happy feedforward_channels=base_channels * 8, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), init_cfg=None), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, reduction='mean', class_weight=[1.0] * num_classes + [0.1]), loss_mask=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=20.0), loss_dice=dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=1.0), train_cfg=dict( assigner=dict( type='MaskHungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict( type='FocalLossCost', weight=20.0, binary_input=True), dice_cost=dict( type='DiceCost', weight=1.0, pred_act=True, eps=1.0)), sampler=dict(type='MaskPseudoSampler')), test_cfg=dict(object_mask_thr=0.8, iou_thr=0.8))) self = MaskFormerHead(**config) self.init_weights() all_cls_scores, all_mask_preds = self.forward(feats, img_metas) # Test that empty ground truth encourages the network to predict background gt_labels_list = [torch.LongTensor([]), torch.LongTensor([])] gt_masks_list = [ torch.zeros((0, 128, 160)).long(), torch.zeros((0, 128, 160)).long() ] empty_gt_losses = self.loss(all_cls_scores, all_mask_preds, gt_labels_list, gt_masks_list, img_metas) # When there is no truth, the cls loss should be nonzero but there should # be no mask loss. for key, loss in empty_gt_losses.items(): if 'cls' in key: assert loss.item() > 0, 'cls loss should be non-zero' elif 'mask' in key: assert loss.item( ) == 0, 'there should be no mask loss when there are no true mask' elif 'dice' in key: assert loss.item( ) == 0, 'there should be no dice loss when there are no true mask' # when truth is non-empty then both cls, mask, dice loss should be nonzero # random inputs gt_labels_list = [ torch.tensor([10, 100]).long(), torch.tensor([100, 10]).long() ] mask1 = torch.zeros((2, 128, 160)).long() mask1[0, :50] = 1 mask1[1, 50:] = 1 mask2 = torch.zeros((2, 128, 160)).long() mask2[0, :, :50] = 1 mask2[1, :, 50:] = 1 gt_masks_list = [mask1, mask2] two_gt_losses = self.loss(all_cls_scores, all_mask_preds, gt_labels_list, gt_masks_list, img_metas) for loss in two_gt_losses.values(): assert loss.item() > 0, 'all loss should be non-zero' # test forward_train gt_bboxes = None gt_labels = [ torch.tensor([10]).long(), torch.tensor([10]).long(), ] thing_mask1 = np.zeros((1, 128, 160), dtype=np.int32) thing_mask1[0, :50] = 1 thing_mask2 = np.zeros((1, 128, 160), dtype=np.int32) thing_mask2[0, :, 50:] = 1 gt_masks = [ BitmapMasks(thing_mask1, 128, 160), BitmapMasks(thing_mask2, 128, 160), ] stuff_mask1 = torch.zeros((1, 128, 160)).long() stuff_mask1[0, :50] = 10 stuff_mask1[0, 50:] = 100 stuff_mask2 = torch.zeros((1, 128, 160)).long() stuff_mask2[0, :, 50:] = 10 stuff_mask2[0, :, :50] = 100 gt_semantic_seg = [stuff_mask1, stuff_mask2] self.forward_train(feats, img_metas, gt_bboxes, gt_labels, gt_masks, gt_semantic_seg) # test inference mode self.simple_test(feats, img_metas) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_paa_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch from mmdet.models.dense_heads import PAAHead, paa_head from mmdet.models.dense_heads.paa_head import levels_to_images def test_paa_head_loss(): """Tests paa head loss when truth is empty and non-empty.""" class mock_skm: def GaussianMixture(self, *args, **kwargs): return self def fit(self, loss): pass def predict(self, loss): components = np.zeros_like(loss, dtype=np.long) return components.reshape(-1) def score_samples(self, loss): scores = np.random.random(len(loss)) return scores paa_head.skm = mock_skm() s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # since Focal Loss is not supported on CPU self = PAAHead( num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] self.init_weights() cls_scores, bbox_preds, iou_preds = self(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'] empty_box_loss = empty_gt_losses['loss_bbox'] empty_iou_loss = empty_gt_losses['loss_iou'] assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') assert empty_iou_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'] onegt_box_loss = one_gt_losses['loss_bbox'] onegt_iou_loss = one_gt_losses['loss_iou'] assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' assert onegt_iou_loss.item() > 0, 'box loss should be non-zero' n, c, h, w = 10, 4, 20, 20 mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)] results = levels_to_images(mlvl_tensor) assert len(results) == n assert results[0].size() == (h * w * 5, c) assert self.with_score_voting self = PAAHead( num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)) cls_scores = [torch.ones(2, 4, 5, 5)] bbox_preds = [torch.ones(2, 4, 5, 5)] iou_preds = [torch.ones(2, 1, 5, 5)] cfg = mmcv.Config( dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) rescale = False self.get_bboxes( cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_pisa_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import PISARetinaHead, PISASSDHead from mmdet.models.roi_heads import PISARoIHead def test_pisa_retinanet_head_loss(): """Tests pisa retinanet head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2), allowed_border=0, pos_weight=-1, debug=False)) self = PISARetinaHead(num_classes=4, in_channels=1, train_cfg=cfg) # Anchor head expects a multiple levels of features per image feat = [ torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))) for i in range(len(self.anchor_generator.strides)) ] cls_scores, bbox_preds = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'].sum() empty_box_loss = empty_gt_losses['loss_bbox'].sum() assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'].sum() onegt_box_loss = one_gt_losses['loss_bbox'].sum() assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' def test_pisa_ssd_head_loss(): """Tests pisa ssd head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False)) ssd_anchor_generator = dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[1], ratios=([2], ), basesize_ratio_range=(0.15, 0.9)) self = PISASSDHead( num_classes=4, in_channels=(1, ), train_cfg=cfg, anchor_generator=ssd_anchor_generator) # Anchor head expects a multiple levels of features per image feat = [ torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))) for i in range(len(self.anchor_generator.strides)) ] cls_scores, bbox_preds = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) # SSD is special, #pos:#neg = 1: 3, so empth gt will also lead loss cls = 0 assert empty_cls_loss.item() == 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' def test_pisa_roi_head_loss(): """Tests pisa roi head loss when truth is empty and non-empty.""" train_cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='ScoreHLRSampler', num=4, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2), allowed_border=0, pos_weight=-1, debug=False)) bbox_roi_extractor = dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=1, featmap_strides=[1]) bbox_head = dict( type='Shared2FCBBoxHead', in_channels=1, fc_out_channels=2, roi_feat_size=7, num_classes=4, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)) self = PISARoIHead(bbox_roi_extractor, bbox_head, train_cfg=train_cfg) s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] # Anchor head expects a multiple levels of features per image feat = [ torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))) for i in range(1) ] proposal_list = [ torch.Tensor([[22.6667, 22.8757, 238.6326, 151.8874], [0, 3, 5, 7]]) ] # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.forward_train(feat, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'].sum() empty_box_loss = empty_gt_losses['loss_bbox'].sum() assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.forward_train(feat, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'].sum() onegt_box_loss = one_gt_losses['loss_bbox'].sum() assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_sabl_retina_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import SABLRetinaHead def test_sabl_retina_head_loss(): """Tests anchor head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] cfg = mmcv.Config( dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) head = SABLRetinaHead( num_classes=4, in_channels=3, feat_channels=10, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), train_cfg=cfg) if torch.cuda.is_available(): head.cuda() # Anchor head expects a multiple levels of features per image feat = [ torch.rand(1, 3, s // (2**(i + 2)), s // (2**(i + 2))).cuda() for i in range(len(head.approx_anchor_generator.base_anchors)) ] cls_scores, bbox_preds = head.forward(feat) # Test that empty ground truth encourages the network # to predict background gt_bboxes = [torch.empty((0, 4)).cuda()] gt_labels = [torch.LongTensor([]).cuda()] gt_bboxes_ignore = None empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there # should be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls']) empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_cls_loss.item() == 0, ( 'there should be no box cls loss when there are no true boxes') assert empty_box_reg_loss.item() == 0, ( 'there should be no box reg loss when there are no true boxes') # When truth is non-empty then both cls and box loss should # be nonzero for random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(), ] gt_labels = [torch.LongTensor([2]).cuda()] one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls']) onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg']) assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_cls_loss.item() > 0, 'box loss cls should be non-zero' assert onegt_box_reg_loss.item() > 0, 'box loss reg should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_solo_head.py ================================================ import pytest import torch from mmdet.models.dense_heads import (DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead) def test_solo_head_loss(): """Tests solo head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] self = SOLOHead( num_classes=4, in_channels=1, num_grids=[40, 36, 24, 16, 12], loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] mask_preds, cls_preds = self.forward(feat) # Test that empty ground truth encourages the network to # predict background. gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_masks = [torch.empty((0, 550, 550))] gt_bboxes_ignore = None empty_gt_losses = self.loss( mask_preds, cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_mask_loss = empty_gt_losses['loss_mask'] empty_cls_loss = empty_gt_losses['loss_cls'] assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_mask_loss.item() == 0, ( 'there should be no mask loss when there are no true masks') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs. gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()] one_gt_losses = self.loss( mask_preds, cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore) onegt_mask_loss = one_gt_losses['loss_mask'] onegt_cls_loss = one_gt_losses['loss_cls'] assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero' # When the length of num_grids, scale_ranges, and num_levels are not equal. with pytest.raises(AssertionError): SOLOHead( num_classes=4, in_channels=1, num_grids=[36, 24, 16, 12], loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)) # When input feature length is not equal to num_levels. with pytest.raises(AssertionError): feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32] ] self.forward(feat) def test_desolo_head_loss(): """Tests solo head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] self = DecoupledSOLOHead( num_classes=4, in_channels=1, num_grids=[40, 36, 24, 16, 12], loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] mask_preds_x, mask_preds_y, cls_preds = self.forward(feat) # Test that empty ground truth encourages the network to # predict background. gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_masks = [torch.empty((0, 550, 550))] gt_bboxes_ignore = None empty_gt_losses = self.loss( mask_preds_x, mask_preds_y, cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_mask_loss = empty_gt_losses['loss_mask'] empty_cls_loss = empty_gt_losses['loss_cls'] assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_mask_loss.item() == 0, ( 'there should be no mask loss when there are no true masks') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs. gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()] one_gt_losses = self.loss( mask_preds_x, mask_preds_y, cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore) onegt_mask_loss = one_gt_losses['loss_mask'] onegt_cls_loss = one_gt_losses['loss_cls'] assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero' # When the length of num_grids, scale_ranges, and num_levels are not equal. with pytest.raises(AssertionError): DecoupledSOLOHead( num_classes=4, in_channels=1, num_grids=[36, 24, 16, 12], loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)) # When input feature length is not equal to num_levels. with pytest.raises(AssertionError): feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32] ] self.forward(feat) def test_desolo_light_head_loss(): """Tests solo head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] self = DecoupledSOLOLightHead( num_classes=4, in_channels=1, num_grids=[40, 36, 24, 16, 12], loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] mask_preds_x, mask_preds_y, cls_preds = self.forward(feat) # Test that empty ground truth encourages the network to # predict background. gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_masks = [torch.empty((0, 550, 550))] gt_bboxes_ignore = None empty_gt_losses = self.loss( mask_preds_x, mask_preds_y, cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_mask_loss = empty_gt_losses['loss_mask'] empty_cls_loss = empty_gt_losses['loss_cls'] assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_mask_loss.item() == 0, ( 'there should be no mask loss when there are no true masks') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs. gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()] one_gt_losses = self.loss( mask_preds_x, mask_preds_y, cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore) onegt_mask_loss = one_gt_losses['loss_mask'] onegt_cls_loss = one_gt_losses['loss_cls'] assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero' # When the length of num_grids, scale_ranges, and num_levels are not equal. with pytest.raises(AssertionError): DecoupledSOLOLightHead( num_classes=4, in_channels=1, num_grids=[36, 24, 16, 12], loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)) # When input feature length is not equal to num_levels. with pytest.raises(AssertionError): feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32] ] self.forward(feat) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_tood_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import TOODHead def test_tood_head_loss(): """Tests paa head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( initial_epoch=4, initial_assigner=dict(type='ATSSAssigner', topk=9), assigner=dict(type='TaskAlignedAssigner', topk=13), alpha=1, beta=6, allowed_border=-1, pos_weight=-1, debug=False)) test_cfg = mmcv.Config( dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # since Focal Loss is not supported on CPU self = TOODHead( num_classes=80, in_channels=1, stacked_convs=6, feat_channels=256, anchor_type='anchor_free', anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), initial_loss_cls=dict( type='FocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input gamma=2.0, alpha=0.25, loss_weight=1.0), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), train_cfg=train_cfg, test_cfg=test_cfg) self.init_weights() feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [8, 16, 32, 64, 128] ] cls_scores, bbox_preds = self(feat) # test initial assigner and losses self.epoch = 0 # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'] empty_box_loss = empty_gt_losses['loss_bbox'] assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero' assert sum(empty_box_loss).item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'] onegt_box_loss = one_gt_losses['loss_bbox'] assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero' assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero' # test task alignment assigner and losses self.epoch = 10 # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'] empty_box_loss = empty_gt_losses['loss_bbox'] assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero' assert sum(empty_box_loss).item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'] onegt_box_loss = one_gt_losses['loss_bbox'] assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero' assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_vfnet_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import VFNetHead def test_vfnet_head_loss(): """Tests vfnet head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False)) # since Focal Loss is not supported on CPU self = VFNetHead( num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict(type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0)) if torch.cuda.is_available(): self.cuda() feat = [ torch.rand(1, 1, s // feat_size, s // feat_size).cuda() for feat_size in [4, 8, 16, 32, 64] ] cls_scores, bbox_preds, bbox_preds_refine = self.forward(feat) # Test that empty ground truth encourages the network to predict # background gt_bboxes = [torch.empty((0, 4)).cuda()] gt_labels = [torch.LongTensor([]).cuda()] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there # should be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'] empty_box_loss = empty_gt_losses['loss_bbox'] assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero # for random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(), ] gt_labels = [torch.LongTensor([2]).cuda()] one_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'] onegt_box_loss = one_gt_losses['loss_bbox'] assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_yolact_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import YOLACTHead, YOLACTProtonet, YOLACTSegmHead def test_yolact_head_loss(): """Tests yolact head losses when truth is empty and non-empty.""" s = 550 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False, min_gt_box_wh=[4.0, 4.0])) bbox_head = YOLACTHead( num_classes=80, in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, base_sizes=[8, 16, 32, 64, 128], ratios=[0.5, 1.0, 2.0], strides=[550.0 / x for x in [69, 35, 18, 9, 5]], centers=[(550 * 0.5 / x, 550 * 0.5 / x) for x in [69, 35, 18, 9, 5]]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs=1, num_protos=32, use_ohem=True, train_cfg=train_cfg) segm_head = YOLACTSegmHead( in_channels=256, num_classes=80, loss_segm=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)) mask_head = YOLACTProtonet( num_classes=80, in_channels=256, num_protos=32, max_masks_to_train=100, loss_mask_weight=6.125) feat = [ torch.rand(1, 256, feat_size, feat_size) for feat_size in [69, 35, 18, 9, 5] ] cls_score, bbox_pred, coeff_pred = bbox_head.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_masks = [torch.empty((0, 550, 550))] gt_bboxes_ignore = None empty_gt_losses, sampling_results = bbox_head.loss( cls_score, bbox_pred, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # Test segm head and mask head segm_head_outs = segm_head(feat[0]) empty_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels) mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas, sampling_results) empty_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results) # When there is no truth, the segm and mask loss should be zero. empty_segm_loss = sum(empty_segm_loss['loss_segm']) empty_mask_loss = sum(empty_mask_loss['loss_mask']) assert empty_segm_loss.item() == 0, ( 'there should be no segm loss when there are no true boxes') assert empty_mask_loss == 0, ( 'there should be no mask loss when there are no true boxes') # When truth is non-empty then cls, box, mask, segm loss should be # nonzero for random inputs. gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()] one_gt_losses, sampling_results = bbox_head.loss( cls_score, bbox_pred, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=gt_bboxes_ignore) one_gt_cls_loss = sum(one_gt_losses['loss_cls']) one_gt_box_loss = sum(one_gt_losses['loss_bbox']) assert one_gt_cls_loss.item() > 0, 'cls loss should be non-zero' assert one_gt_box_loss.item() > 0, 'box loss should be non-zero' one_gt_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels) mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas, sampling_results) one_gt_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results) one_gt_segm_loss = sum(one_gt_segm_loss['loss_segm']) one_gt_mask_loss = sum(one_gt_mask_loss['loss_mask']) assert one_gt_segm_loss.item() > 0, 'segm loss should be non-zero' assert one_gt_mask_loss.item() > 0, 'mask loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_yolof_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.dense_heads import YOLOFHead def test_yolof_head_loss(): """Tests yolof head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict( type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7), allowed_border=-1, pos_weight=-1, debug=False)) self = YOLOFHead( num_classes=4, in_channels=1, reg_decoded_bbox=True, train_cfg=train_cfg, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[1, 2, 4, 8, 16], strides=[32]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1., 1., 1., 1.], add_ctr_clamp=True, ctr_clamp=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.0)) feat = [torch.rand(1, 1, s // 32, s // 32)] cls_scores, bbox_preds = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] gt_bboxes_ignore = None empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'] empty_box_loss = empty_gt_losses['loss_bbox'] assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss should be nonzero for # random inputs gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) onegt_cls_loss = one_gt_losses['loss_cls'] onegt_box_loss = one_gt_losses['loss_bbox'] assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_dense_heads/test_yolox_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmdet.models.dense_heads import YOLOXHead def test_yolox_head_loss(): """Tests yolox head loss when truth is empty and non-empty.""" s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3) }] train_cfg = mmcv.Config( dict( assigner=dict( type='SimOTAAssigner', center_radius=2.5, candidate_topk=10, iou_weight=3.0, cls_weight=1.0))) self = YOLOXHead( num_classes=4, in_channels=1, use_depthwise=False, train_cfg=train_cfg) assert not self.use_l1 assert isinstance(self.multi_level_cls_convs[0][0], ConvModule) feat = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16] ] cls_scores, bbox_preds, objectnesses = self.forward(feat) # Test that empty ground truth encourages the network to predict background gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas) # When there is no truth, the cls loss should be nonzero but there should # be no box loss. empty_cls_loss = empty_gt_losses['loss_cls'].sum() empty_box_loss = empty_gt_losses['loss_bbox'].sum() empty_obj_loss = empty_gt_losses['loss_obj'].sum() assert empty_cls_loss.item() == 0, ( 'there should be no cls loss when there are no true boxes') assert empty_box_loss.item() == 0, ( 'there should be no box loss when there are no true boxes') assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero' # When truth is non-empty then both cls and box loss should be nonzero for # random inputs self = YOLOXHead( num_classes=4, in_channels=1, use_depthwise=True, train_cfg=train_cfg) assert isinstance(self.multi_level_cls_convs[0][0], DepthwiseSeparableConvModule) self.use_l1 = True gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas) onegt_cls_loss = one_gt_losses['loss_cls'].sum() onegt_box_loss = one_gt_losses['loss_bbox'].sum() onegt_obj_loss = one_gt_losses['loss_obj'].sum() onegt_l1_loss = one_gt_losses['loss_l1'].sum() assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' assert onegt_box_loss.item() > 0, 'box loss should be non-zero' assert onegt_obj_loss.item() > 0, 'obj loss should be non-zero' assert onegt_l1_loss.item() > 0, 'l1 loss should be non-zero' # Test groud truth out of bound gt_bboxes = [torch.Tensor([[s * 4, s * 4, s * 4 + 10, s * 4 + 10]])] gt_labels = [torch.LongTensor([2])] empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas) # When gt_bboxes out of bound, the assign results should be empty, # so the cls and bbox loss should be zero. empty_cls_loss = empty_gt_losses['loss_cls'].sum() empty_box_loss = empty_gt_losses['loss_bbox'].sum() empty_obj_loss = empty_gt_losses['loss_obj'].sum() assert empty_cls_loss.item() == 0, ( 'there should be no cls loss when gt_bboxes out of bound') assert empty_box_loss.item() == 0, ( 'there should be no box loss when gt_bboxes out of bound') assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_forward.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """pytest tests/test_forward.py.""" import copy from os.path import dirname, exists, join import numpy as np import pytest import torch def _get_config_directory(): """Find the predefined detector config directory.""" try: # Assume we are running in the source mmdetection repo repo_dpath = dirname(dirname(dirname(__file__))) except NameError: # For IPython development when this __file__ is not defined import mmdet repo_dpath = dirname(dirname(mmdet.__file__)) config_dpath = join(repo_dpath, 'configs') if not exists(config_dpath): raise Exception('Cannot find config path') return config_dpath def _get_config_module(fname): """Load a configuration as a python module.""" from mmcv import Config config_dpath = _get_config_directory() config_fpath = join(config_dpath, fname) config_mod = Config.fromfile(config_fpath) return config_mod def _get_detector_cfg(fname): """Grab configs necessary to create a detector. These are deep copied to allow for safe modification of parameters without influencing other tests. """ config = _get_config_module(fname) model = copy.deepcopy(config.model) return model def _replace_r50_with_r18(model): """Replace ResNet50 with ResNet18 in config.""" model = copy.deepcopy(model) if model.backbone.type == 'ResNet': model.backbone.depth = 18 model.backbone.base_channels = 2 model.neck.in_channels = [2, 4, 8, 16] return model def test_sparse_rcnn_forward(): config_path = 'sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' model = _get_detector_cfg(config_path) model = _replace_r50_with_r18(model) model.backbone.init_cfg = None from mmdet.models import build_detector detector = build_detector(model) detector.init_weights() input_shape = (1, 3, 100, 100) mm_inputs = _demo_mm_inputs(input_shape, num_items=[5]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') # Test forward train with non-empty truth batch detector.train() gt_bboxes = mm_inputs['gt_bboxes'] gt_bboxes = [item for item in gt_bboxes] gt_labels = mm_inputs['gt_labels'] gt_labels = [item for item in gt_labels] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 detector.forward_dummy(imgs) # Test forward train with an empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[0]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') gt_bboxes = mm_inputs['gt_bboxes'] gt_bboxes = [item for item in gt_bboxes] gt_labels = mm_inputs['gt_labels'] gt_labels = [item for item in gt_labels] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test forward test detector.eval() with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], rescale=True, return_loss=False) batch_results.append(result) # test empty proposal in roi_head with torch.no_grad(): # test no proposal in the whole batch detector.roi_head.simple_test([imgs[0][None, :]], torch.empty( (1, 0, 4)), torch.empty((1, 100, 4)), [img_metas[0]], torch.ones((1, 4))) def test_rpn_forward(): model = _get_detector_cfg('rpn/rpn_r50_fpn_1x_coco.py') model = _replace_r50_with_r18(model) model.backbone.init_cfg = None from mmdet.models import build_detector detector = build_detector(model) input_shape = (1, 3, 100, 100) mm_inputs = _demo_mm_inputs(input_shape) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') # Test forward train gt_bboxes = mm_inputs['gt_bboxes'] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, return_loss=True) assert isinstance(losses, dict) # Test forward test with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], return_loss=False) batch_results.append(result) @pytest.mark.parametrize( 'cfg_file', [ 'reppoints/reppoints_moment_r50_fpn_1x_coco.py', 'retinanet/retinanet_r50_fpn_1x_coco.py', 'guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py', 'ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py', 'foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py', # 'free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py', # 'atss/atss_r50_fpn_1x_coco.py', # not ready for topk 'yolo/yolov3_mobilenetv2_320_300e_coco.py', 'yolox/yolox_tiny_8x8_300e_coco.py' ]) def test_single_stage_forward_gpu(cfg_file): if not torch.cuda.is_available(): import pytest pytest.skip('test requires GPU and torch+cuda') model = _get_detector_cfg(cfg_file) model = _replace_r50_with_r18(model) model.backbone.init_cfg = None from mmdet.models import build_detector detector = build_detector(model) input_shape = (2, 3, 128, 128) mm_inputs = _demo_mm_inputs(input_shape) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') detector = detector.cuda() imgs = imgs.cuda() # Test forward train gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']] gt_labels = [g.cuda() for g in mm_inputs['gt_labels']] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) # Test forward test detector.eval() with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], return_loss=False) batch_results.append(result) def test_faster_rcnn_ohem_forward(): model = _get_detector_cfg( 'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py') model = _replace_r50_with_r18(model) model.backbone.init_cfg = None from mmdet.models import build_detector detector = build_detector(model) input_shape = (1, 3, 100, 100) # Test forward train with a non-empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[10]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test forward train with an empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[0]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test RoI forward train with an empty proposals feature = detector.extract_feat(imgs[0][None, :]) losses = detector.roi_head.forward_train( feature, img_metas, [torch.empty((0, 5))], gt_bboxes=gt_bboxes, gt_labels=gt_labels) assert isinstance(losses, dict) @pytest.mark.parametrize( 'cfg_file', [ # 'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py', 'mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py', # 'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py', # 'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py', # 'htc/htc_r50_fpn_1x_coco.py', # 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py', # 'scnet/scnet_r50_fpn_20e_coco.py', # 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 ]) def test_two_stage_forward(cfg_file): models_with_semantic = [ 'htc/htc_r50_fpn_1x_coco.py', 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py', 'scnet/scnet_r50_fpn_20e_coco.py', ] if cfg_file in models_with_semantic: with_semantic = True else: with_semantic = False model = _get_detector_cfg(cfg_file) model = _replace_r50_with_r18(model) model.backbone.init_cfg = None # Save cost if cfg_file in [ 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 ]: model.roi_head.bbox_head.num_classes = 80 model.roi_head.bbox_head.loss_cls.num_classes = 80 model.roi_head.mask_head.num_classes = 80 model.test_cfg.rcnn.score_thr = 0.05 model.test_cfg.rcnn.max_per_img = 100 from mmdet.models import build_detector detector = build_detector(model) input_shape = (1, 3, 128, 128) # Test forward train with a non-empty truth batch mm_inputs = _demo_mm_inputs( input_shape, num_items=[10], with_semantic=with_semantic) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) loss.requires_grad_(True) assert float(loss.item()) > 0 loss.backward() # Test forward train with an empty truth batch mm_inputs = _demo_mm_inputs( input_shape, num_items=[0], with_semantic=with_semantic) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) loss.requires_grad_(True) assert float(loss.item()) > 0 loss.backward() # Test RoI forward train with an empty proposals if cfg_file in [ 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' # noqa: E501 ]: mm_inputs.pop('gt_semantic_seg') feature = detector.extract_feat(imgs[0][None, :]) losses = detector.roi_head.forward_train(feature, img_metas, [torch.empty( (0, 5))], **mm_inputs) assert isinstance(losses, dict) # Test forward test with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], return_loss=False) batch_results.append(result) cascade_models = [ 'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py', 'htc/htc_r50_fpn_1x_coco.py', 'scnet/scnet_r50_fpn_20e_coco.py', ] # test empty proposal in roi_head with torch.no_grad(): # test no proposal in the whole batch detector.simple_test( imgs[0][None, :], [img_metas[0]], proposals=[torch.empty((0, 4))]) # test no proposal of aug features = detector.extract_feats([imgs[0][None, :]] * 2) detector.roi_head.aug_test(features, [torch.empty((0, 4))] * 2, [[img_metas[0]]] * 2) # test rcnn_test_cfg is None if cfg_file not in cascade_models: feature = detector.extract_feat(imgs[0][None, :]) bboxes, scores = detector.roi_head.simple_test_bboxes( feature, [img_metas[0]], [torch.empty((0, 4))], None) assert all([bbox.shape == torch.Size((0, 4)) for bbox in bboxes]) assert all([ score.shape == torch.Size( (0, detector.roi_head.bbox_head.fc_cls.out_features)) for score in scores ]) # test no proposal in the some image x1y1 = torch.randint(1, 100, (10, 2)).float() # x2y2 must be greater than x1y1 x2y2 = x1y1 + torch.randint(1, 100, (10, 2)) detector.simple_test( imgs[0][None, :].repeat(2, 1, 1, 1), [img_metas[0]] * 2, proposals=[torch.empty((0, 4)), torch.cat([x1y1, x2y2], dim=-1)]) # test no proposal of aug detector.roi_head.aug_test( features, [torch.cat([x1y1, x2y2], dim=-1), torch.empty((0, 4))], [[img_metas[0]]] * 2) # test rcnn_test_cfg is None if cfg_file not in cascade_models: feature = detector.extract_feat(imgs[0][None, :].repeat( 2, 1, 1, 1)) bboxes, scores = detector.roi_head.simple_test_bboxes( feature, [img_metas[0]] * 2, [torch.empty((0, 4)), torch.cat([x1y1, x2y2], dim=-1)], None) assert bboxes[0].shape == torch.Size((0, 4)) assert scores[0].shape == torch.Size( (0, detector.roi_head.bbox_head.fc_cls.out_features)) @pytest.mark.parametrize( 'cfg_file', ['ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'ssd/ssd300_coco.py']) def test_single_stage_forward_cpu(cfg_file): model = _get_detector_cfg(cfg_file) model = _replace_r50_with_r18(model) model.backbone.init_cfg = None from mmdet.models import build_detector detector = build_detector(model) input_shape = (1, 3, 300, 300) mm_inputs = _demo_mm_inputs(input_shape) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') # Test forward train gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) # Test forward test detector.eval() with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], return_loss=False) batch_results.append(result) def _demo_mm_inputs(input_shape=(1, 3, 300, 300), num_items=None, num_classes=10, with_semantic=False): # yapf: disable """Create a superset of inputs needed to run test or train batches. Args: input_shape (tuple): input batch dimensions num_items (None | List[int]): specifies the number of boxes in each batch item num_classes (int): number of different labels a box might have """ from mmdet.core import BitmapMasks (N, C, H, W) = input_shape rng = np.random.RandomState(0) imgs = rng.rand(*input_shape) img_metas = [{ 'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '.png', 'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]), 'flip': False, 'flip_direction': None, } for _ in range(N)] gt_bboxes = [] gt_labels = [] gt_masks = [] for batch_idx in range(N): if num_items is None: num_boxes = rng.randint(1, 10) else: num_boxes = num_items[batch_idx] cx, cy, bw, bh = rng.rand(num_boxes, 4).T tl_x = ((cx * W) - (W * bw / 2)).clip(0, W) tl_y = ((cy * H) - (H * bh / 2)).clip(0, H) br_x = ((cx * W) + (W * bw / 2)).clip(0, W) br_y = ((cy * H) + (H * bh / 2)).clip(0, H) boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T class_idxs = rng.randint(1, num_classes, size=num_boxes) gt_bboxes.append(torch.FloatTensor(boxes)) gt_labels.append(torch.LongTensor(class_idxs)) mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8) gt_masks.append(BitmapMasks(mask, H, W)) mm_inputs = { 'imgs': torch.FloatTensor(imgs).requires_grad_(True), 'img_metas': img_metas, 'gt_bboxes': gt_bboxes, 'gt_labels': gt_labels, 'gt_bboxes_ignore': None, 'gt_masks': gt_masks, } if with_semantic: # assume gt_semantic_seg using scale 1/8 of the img gt_semantic_seg = np.random.randint( 0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8) mm_inputs.update( {'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)}) return mm_inputs def test_yolact_forward(): model = _get_detector_cfg('yolact/yolact_r50_1x8_coco.py') model = _replace_r50_with_r18(model) model.backbone.init_cfg = None from mmdet.models import build_detector detector = build_detector(model) input_shape = (1, 3, 100, 100) mm_inputs = _demo_mm_inputs(input_shape) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') # Test forward train detector.train() gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] gt_masks = mm_inputs['gt_masks'] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_masks=gt_masks, return_loss=True) assert isinstance(losses, dict) # Test forward dummy for get_flops detector.forward_dummy(imgs) # Test forward test detector.eval() with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], rescale=True, return_loss=False) batch_results.append(result) def test_detr_forward(): model = _get_detector_cfg('detr/detr_r50_8x2_150e_coco.py') model.backbone.depth = 18 model.bbox_head.in_channels = 512 model.backbone.init_cfg = None from mmdet.models import build_detector detector = build_detector(model) input_shape = (1, 3, 100, 100) mm_inputs = _demo_mm_inputs(input_shape) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') # Test forward train with non-empty truth batch detector.train() gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test forward train with an empty truth batch mm_inputs = _demo_mm_inputs(input_shape, num_items=[0]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] losses = detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test forward test detector.eval() with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], rescale=True, return_loss=False) batch_results.append(result) def test_inference_detector(): from mmcv import ConfigDict from mmdet.apis import inference_detector from mmdet.models import build_detector # small RetinaNet num_class = 3 model_dict = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=18, num_stages=4, out_indices=(3, ), norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch'), neck=None, bbox_head=dict( type='RetinaHead', num_classes=num_class, in_channels=512, stacked_convs=1, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5], strides=[32]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), ), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) rng = np.random.RandomState(0) img1 = rng.rand(100, 100, 3) img2 = rng.rand(100, 100, 3) model = build_detector(ConfigDict(model_dict)) config = _get_config_module('retinanet/retinanet_r50_fpn_1x_coco.py') model.cfg = config # test single image result = inference_detector(model, img1) assert len(result) == num_class # test multiple image result = inference_detector(model, [img1, img2]) assert len(result) == 2 and len(result[0]) == num_class def test_yolox_random_size(): from mmdet.models import build_detector model = _get_detector_cfg('yolox/yolox_tiny_8x8_300e_coco.py') model.random_size_range = (2, 2) model.input_size = (64, 96) model.random_size_interval = 1 detector = build_detector(model) input_shape = (1, 3, 64, 64) mm_inputs = _demo_mm_inputs(input_shape) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') # Test forward train with non-empty truth batch detector.train() gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] detector.forward( imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert detector._input_size == (64, 96) def test_maskformer_forward(): model_cfg = _get_detector_cfg( 'maskformer/maskformer_r50_mstrain_16x1_75e_coco.py') base_channels = 32 model_cfg.backbone.depth = 18 model_cfg.backbone.init_cfg = None model_cfg.backbone.base_channels = base_channels model_cfg.panoptic_head.in_channels = [ base_channels * 2**i for i in range(4) ] model_cfg.panoptic_head.feat_channels = base_channels model_cfg.panoptic_head.out_channels = base_channels model_cfg.panoptic_head.pixel_decoder.encoder.\ transformerlayers.attn_cfgs.embed_dims = base_channels model_cfg.panoptic_head.pixel_decoder.encoder.\ transformerlayers.ffn_cfgs.embed_dims = base_channels model_cfg.panoptic_head.pixel_decoder.encoder.\ transformerlayers.ffn_cfgs.feedforward_channels = base_channels * 8 model_cfg.panoptic_head.pixel_decoder.\ positional_encoding.num_feats = base_channels // 2 model_cfg.panoptic_head.positional_encoding.\ num_feats = base_channels // 2 model_cfg.panoptic_head.transformer_decoder.\ transformerlayers.attn_cfgs.embed_dims = base_channels model_cfg.panoptic_head.transformer_decoder.\ transformerlayers.ffn_cfgs.embed_dims = base_channels model_cfg.panoptic_head.transformer_decoder.\ transformerlayers.ffn_cfgs.feedforward_channels = base_channels * 8 model_cfg.panoptic_head.transformer_decoder.\ transformerlayers.feedforward_channels = base_channels * 8 from mmdet.core import BitmapMasks from mmdet.models import build_detector detector = build_detector(model_cfg) # Test forward train with non-empty truth batch detector.train() img_metas = [ { 'batch_input_shape': (128, 160), 'img_shape': (126, 160, 3), 'ori_shape': (63, 80, 3), 'pad_shape': (128, 160, 3) }, ] img = torch.rand((1, 3, 128, 160)) gt_bboxes = None gt_labels = [ torch.tensor([10]).long(), ] thing_mask1 = np.zeros((1, 128, 160), dtype=np.int32) thing_mask1[0, :50] = 1 gt_masks = [ BitmapMasks(thing_mask1, 128, 160), ] stuff_mask1 = torch.zeros((1, 128, 160)).long() stuff_mask1[0, :50] = 10 stuff_mask1[0, 50:] = 100 gt_semantic_seg = [ stuff_mask1, ] losses = detector.forward( img=img, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_masks=gt_masks, gt_semantic_seg=gt_semantic_seg, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test forward train with an empty truth batch gt_bboxes = [ torch.empty((0, 4)).float(), ] gt_labels = [ torch.empty((0, )).long(), ] mask = np.zeros((0, 128, 160), dtype=np.uint8) gt_masks = [ BitmapMasks(mask, 128, 160), ] gt_semantic_seg = [ torch.randint(0, 133, (0, 128, 160)), ] losses = detector.forward( img, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_masks=gt_masks, gt_semantic_seg=gt_semantic_seg, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test forward test detector.eval() with torch.no_grad(): img_list = [g[None, :] for g in img] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], rescale=True, return_loss=False) batch_results.append(result) @pytest.mark.parametrize('cfg_file', [ 'mask2former/mask2former_r50_lsj_8x2_50e_coco.py', 'mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py' ]) def test_mask2former_forward(cfg_file): # Test Panoptic Segmentation and Instance Segmentation model_cfg = _get_detector_cfg(cfg_file) base_channels = 32 model_cfg.backbone.depth = 18 model_cfg.backbone.init_cfg = None model_cfg.backbone.base_channels = base_channels model_cfg.panoptic_head.in_channels = [ base_channels * 2**i for i in range(4) ] model_cfg.panoptic_head.feat_channels = base_channels model_cfg.panoptic_head.out_channels = base_channels model_cfg.panoptic_head.pixel_decoder.encoder.\ transformerlayers.attn_cfgs.embed_dims = base_channels model_cfg.panoptic_head.pixel_decoder.encoder.\ transformerlayers.ffn_cfgs.embed_dims = base_channels model_cfg.panoptic_head.pixel_decoder.encoder.\ transformerlayers.ffn_cfgs.feedforward_channels = base_channels * 4 model_cfg.panoptic_head.pixel_decoder.\ positional_encoding.num_feats = base_channels // 2 model_cfg.panoptic_head.positional_encoding.\ num_feats = base_channels // 2 model_cfg.panoptic_head.transformer_decoder.\ transformerlayers.attn_cfgs.embed_dims = base_channels model_cfg.panoptic_head.transformer_decoder.\ transformerlayers.ffn_cfgs.embed_dims = base_channels model_cfg.panoptic_head.transformer_decoder.\ transformerlayers.ffn_cfgs.feedforward_channels = base_channels * 8 model_cfg.panoptic_head.transformer_decoder.\ transformerlayers.feedforward_channels = base_channels * 8 num_stuff_classes = model_cfg.panoptic_head.num_stuff_classes from mmdet.core import BitmapMasks from mmdet.models import build_detector detector = build_detector(model_cfg) def _forward_train(): losses = detector.forward( img, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_masks=gt_masks, gt_semantic_seg=gt_semantic_seg, return_loss=True) assert isinstance(losses, dict) loss, _ = detector._parse_losses(losses) assert float(loss.item()) > 0 # Test forward train with non-empty truth batch detector.train() img_metas = [ { 'batch_input_shape': (128, 160), 'img_shape': (126, 160, 3), 'ori_shape': (63, 80, 3), 'pad_shape': (128, 160, 3) }, ] img = torch.rand((1, 3, 128, 160)) gt_bboxes = None gt_labels = [ torch.tensor([10]).long(), ] thing_mask1 = np.zeros((1, 128, 160), dtype=np.int32) thing_mask1[0, :50] = 1 gt_masks = [ BitmapMasks(thing_mask1, 128, 160), ] stuff_mask1 = torch.zeros((1, 128, 160)).long() stuff_mask1[0, :50] = 10 stuff_mask1[0, 50:] = 100 gt_semantic_seg = [ stuff_mask1, ] _forward_train() # Test forward train with non-empty truth batch and gt_semantic_seg=None gt_semantic_seg = None _forward_train() # Test forward train with an empty truth batch gt_bboxes = [ torch.empty((0, 4)).float(), ] gt_labels = [ torch.empty((0, )).long(), ] mask = np.zeros((0, 128, 160), dtype=np.uint8) gt_masks = [ BitmapMasks(mask, 128, 160), ] gt_semantic_seg = [ torch.randint(0, 133, (0, 128, 160)), ] _forward_train() # Test forward train with an empty truth batch and gt_semantic_seg=None gt_semantic_seg = None _forward_train() # Test forward test detector.eval() with torch.no_grad(): img_list = [g[None, :] for g in img] batch_results = [] for one_img, one_meta in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], rescale=True, return_loss=False) if num_stuff_classes > 0: assert isinstance(result[0], dict) else: assert isinstance(result[0], tuple) batch_results.append(result) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_loss.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmcv.utils import digit_version from mmdet.models.losses import (BalancedL1Loss, CrossEntropyLoss, DiceLoss, DistributionFocalLoss, FocalLoss, GaussianFocalLoss, KnowledgeDistillationKLDivLoss, L1Loss, MSELoss, QualityFocalLoss, SeesawLoss, SmoothL1Loss, VarifocalLoss) from mmdet.models.losses.ghm_loss import GHMC, GHMR from mmdet.models.losses.iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss) @pytest.mark.parametrize( 'loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss]) def test_iou_type_loss_zeros_weight(loss_class): pred = torch.rand((10, 4)) target = torch.rand((10, 4)) weight = torch.zeros(10) loss = loss_class()(pred, target, weight) assert loss == 0. @pytest.mark.parametrize('loss_class', [ BalancedL1Loss, BoundedIoULoss, CIoULoss, CrossEntropyLoss, DIoULoss, FocalLoss, DistributionFocalLoss, MSELoss, SeesawLoss, GaussianFocalLoss, GIoULoss, IoULoss, L1Loss, QualityFocalLoss, VarifocalLoss, GHMR, GHMC, SmoothL1Loss, KnowledgeDistillationKLDivLoss, DiceLoss ]) def test_loss_with_reduction_override(loss_class): pred = torch.rand((10, 4)) target = torch.rand((10, 4)), weight = None with pytest.raises(AssertionError): # only reduction_override from [None, 'none', 'mean', 'sum'] # is not allowed reduction_override = True loss_class()( pred, target, weight, reduction_override=reduction_override) @pytest.mark.parametrize('loss_class', [ IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, MSELoss, L1Loss, SmoothL1Loss, BalancedL1Loss ]) @pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)]) def test_regression_losses(loss_class, input_shape): pred = torch.rand(input_shape) target = torch.rand(input_shape) weight = torch.rand(input_shape) # Test loss forward loss = loss_class()(pred, target) assert isinstance(loss, torch.Tensor) # Test loss forward with weight loss = loss_class()(pred, target, weight) assert isinstance(loss, torch.Tensor) # Test loss forward with reduction_override loss = loss_class()(pred, target, reduction_override='mean') assert isinstance(loss, torch.Tensor) # Test loss forward with avg_factor loss = loss_class()(pred, target, avg_factor=10) assert isinstance(loss, torch.Tensor) with pytest.raises(ValueError): # loss can evaluate with avg_factor only if # reduction is None, 'none' or 'mean'. reduction_override = 'sum' loss_class()( pred, target, avg_factor=10, reduction_override=reduction_override) # Test loss forward with avg_factor and reduction for reduction_override in [None, 'none', 'mean']: loss_class()( pred, target, avg_factor=10, reduction_override=reduction_override) assert isinstance(loss, torch.Tensor) @pytest.mark.parametrize('loss_class', [FocalLoss, CrossEntropyLoss]) @pytest.mark.parametrize('input_shape', [(10, 5), (0, 5)]) def test_classification_losses(loss_class, input_shape): if input_shape[0] == 0 and digit_version( torch.__version__) < digit_version('1.5.0'): pytest.skip( f'CELoss in PyTorch {torch.__version__} does not support empty' f'tensor.') pred = torch.rand(input_shape) target = torch.randint(0, 5, (input_shape[0], )) # Test loss forward loss = loss_class()(pred, target) assert isinstance(loss, torch.Tensor) # Test loss forward with reduction_override loss = loss_class()(pred, target, reduction_override='mean') assert isinstance(loss, torch.Tensor) # Test loss forward with avg_factor loss = loss_class()(pred, target, avg_factor=10) assert isinstance(loss, torch.Tensor) with pytest.raises(ValueError): # loss can evaluate with avg_factor only if # reduction is None, 'none' or 'mean'. reduction_override = 'sum' loss_class()( pred, target, avg_factor=10, reduction_override=reduction_override) # Test loss forward with avg_factor and reduction for reduction_override in [None, 'none', 'mean']: loss_class()( pred, target, avg_factor=10, reduction_override=reduction_override) assert isinstance(loss, torch.Tensor) @pytest.mark.parametrize('loss_class', [GHMR]) @pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)]) def test_GHMR_loss(loss_class, input_shape): pred = torch.rand(input_shape) target = torch.rand(input_shape) weight = torch.rand(input_shape) # Test loss forward loss = loss_class()(pred, target, weight) assert isinstance(loss, torch.Tensor) @pytest.mark.parametrize('use_sigmoid', [True, False]) @pytest.mark.parametrize('reduction', ['sum', 'mean', None]) @pytest.mark.parametrize('avg_non_ignore', [True, False]) def test_loss_with_ignore_index(use_sigmoid, reduction, avg_non_ignore): # Test cross_entropy loss loss_class = CrossEntropyLoss( use_sigmoid=use_sigmoid, use_mask=False, ignore_index=255, avg_non_ignore=avg_non_ignore) pred = torch.rand((10, 5)) target = torch.randint(0, 5, (10, )) ignored_indices = torch.randint(0, 10, (2, ), dtype=torch.long) target[ignored_indices] = 255 # Test loss forward with default ignore loss_with_ignore = loss_class(pred, target, reduction_override=reduction) assert isinstance(loss_with_ignore, torch.Tensor) # Test loss forward with forward ignore target[ignored_indices] = 255 loss_with_forward_ignore = loss_class( pred, target, ignore_index=255, reduction_override=reduction) assert isinstance(loss_with_forward_ignore, torch.Tensor) # Verify correctness if avg_non_ignore: # manually remove the ignored elements not_ignored_indices = (target != 255) pred = pred[not_ignored_indices] target = target[not_ignored_indices] loss = loss_class(pred, target, reduction_override=reduction) assert torch.allclose(loss, loss_with_ignore) assert torch.allclose(loss, loss_with_forward_ignore) # test ignore all target pred = torch.rand((10, 5)) target = torch.ones((10, ), dtype=torch.long) * 255 loss = loss_class(pred, target, reduction_override=reduction) assert loss == 0 @pytest.mark.parametrize('naive_dice', [True, False]) def test_dice_loss(naive_dice): loss_class = DiceLoss pred = torch.rand((10, 4, 4)) target = torch.rand((10, 4, 4)) weight = torch.rand((10)) # Test loss forward loss = loss_class(naive_dice=naive_dice)(pred, target) assert isinstance(loss, torch.Tensor) # Test loss forward with weight loss = loss_class(naive_dice=naive_dice)(pred, target, weight) assert isinstance(loss, torch.Tensor) # Test loss forward with reduction_override loss = loss_class(naive_dice=naive_dice)( pred, target, reduction_override='mean') assert isinstance(loss, torch.Tensor) # Test loss forward with avg_factor loss = loss_class(naive_dice=naive_dice)(pred, target, avg_factor=10) assert isinstance(loss, torch.Tensor) with pytest.raises(ValueError): # loss can evaluate with avg_factor only if # reduction is None, 'none' or 'mean'. reduction_override = 'sum' loss_class(naive_dice=naive_dice)( pred, target, avg_factor=10, reduction_override=reduction_override) # Test loss forward with avg_factor and reduction for reduction_override in [None, 'none', 'mean']: loss_class(naive_dice=naive_dice)( pred, target, avg_factor=10, reduction_override=reduction_override) assert isinstance(loss, torch.Tensor) # Test loss forward with has_acted=False and use_sigmoid=False with pytest.raises(NotImplementedError): loss_class( use_sigmoid=False, activate=True, naive_dice=naive_dice)(pred, target) # Test loss forward with weight.ndim != loss.ndim with pytest.raises(AssertionError): weight = torch.rand((2, 8)) loss_class(naive_dice=naive_dice)(pred, target, weight) # Test loss forward with len(weight) != len(pred) with pytest.raises(AssertionError): weight = torch.rand((8)) loss_class(naive_dice=naive_dice)(pred, target, weight) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_loss_compatibility.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """pytest tests/test_loss_compatibility.py.""" import copy from os.path import dirname, exists, join import numpy as np import pytest import torch def _get_config_directory(): """Find the predefined detector config directory.""" try: # Assume we are running in the source mmdetection repo repo_dpath = dirname(dirname(dirname(__file__))) except NameError: # For IPython development when this __file__ is not defined import mmdet repo_dpath = dirname(dirname(mmdet.__file__)) config_dpath = join(repo_dpath, 'configs') if not exists(config_dpath): raise Exception('Cannot find config path') return config_dpath def _get_config_module(fname): """Load a configuration as a python module.""" from mmcv import Config config_dpath = _get_config_directory() config_fpath = join(config_dpath, fname) config_mod = Config.fromfile(config_fpath) return config_mod def _get_detector_cfg(fname): """Grab configs necessary to create a detector. These are deep copied to allow for safe modification of parameters without influencing other tests. """ config = _get_config_module(fname) model = copy.deepcopy(config.model) return model @pytest.mark.parametrize('loss_bbox', [ dict(type='L1Loss', loss_weight=1.0), dict(type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0), dict(type='IoULoss', loss_weight=1.0), dict(type='BoundedIoULoss', loss_weight=1.0), dict(type='GIoULoss', loss_weight=1.0), dict(type='DIoULoss', loss_weight=1.0), dict(type='CIoULoss', loss_weight=1.0), dict(type='MSELoss', loss_weight=1.0), dict(type='SmoothL1Loss', loss_weight=1.0), dict(type='BalancedL1Loss', loss_weight=1.0) ]) def test_bbox_loss_compatibility(loss_bbox): """Test loss_bbox compatibility. Using Faster R-CNN as a sample, modifying the loss function in the config file to verify the compatibility of Loss APIS """ # Faster R-CNN config dict config_path = '_base_/models/faster_rcnn_r50_fpn.py' cfg_model = _get_detector_cfg(config_path) input_shape = (1, 3, 256, 256) mm_inputs = _demo_mm_inputs(input_shape, num_items=[10]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') if 'IoULoss' in loss_bbox['type']: cfg_model.roi_head.bbox_head.reg_decoded_bbox = True cfg_model.roi_head.bbox_head.loss_bbox = loss_bbox from mmdet.models import build_detector detector = build_detector(cfg_model) loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs) assert isinstance(loss, dict) loss, _ = detector._parse_losses(loss) assert float(loss.item()) > 0 @pytest.mark.parametrize('loss_cls', [ dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), dict( type='GHMC', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0) ]) def test_cls_loss_compatibility(loss_cls): """Test loss_cls compatibility. Using Faster R-CNN as a sample, modifying the loss function in the config file to verify the compatibility of Loss APIS """ # Faster R-CNN config dict config_path = '_base_/models/faster_rcnn_r50_fpn.py' cfg_model = _get_detector_cfg(config_path) input_shape = (1, 3, 256, 256) mm_inputs = _demo_mm_inputs(input_shape, num_items=[10]) imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') # verify class loss function compatibility # for loss_cls in loss_clses: cfg_model.roi_head.bbox_head.loss_cls = loss_cls from mmdet.models import build_detector detector = build_detector(cfg_model) loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs) assert isinstance(loss, dict) loss, _ = detector._parse_losses(loss) assert float(loss.item()) > 0 def _demo_mm_inputs(input_shape=(1, 3, 300, 300), num_items=None, num_classes=10, with_semantic=False): # yapf: disable """Create a superset of inputs needed to run test or train batches. Args: input_shape (tuple): input batch dimensions num_items (None | List[int]): specifies the number of boxes in each batch item num_classes (int): number of different labels a box might have """ from mmdet.core import BitmapMasks (N, C, H, W) = input_shape rng = np.random.RandomState(0) imgs = rng.rand(*input_shape) img_metas = [{ 'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '.png', 'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]), 'flip': False, 'flip_direction': None, } for _ in range(N)] gt_bboxes = [] gt_labels = [] gt_masks = [] for batch_idx in range(N): if num_items is None: num_boxes = rng.randint(1, 10) else: num_boxes = num_items[batch_idx] cx, cy, bw, bh = rng.rand(num_boxes, 4).T tl_x = ((cx * W) - (W * bw / 2)).clip(0, W) tl_y = ((cy * H) - (H * bh / 2)).clip(0, H) br_x = ((cx * W) + (W * bw / 2)).clip(0, W) br_y = ((cy * H) + (H * bh / 2)).clip(0, H) boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T class_idxs = rng.randint(1, num_classes, size=num_boxes) gt_bboxes.append(torch.FloatTensor(boxes)) gt_labels.append(torch.LongTensor(class_idxs)) mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8) gt_masks.append(BitmapMasks(mask, H, W)) mm_inputs = { 'imgs': torch.FloatTensor(imgs).requires_grad_(True), 'img_metas': img_metas, 'gt_bboxes': gt_bboxes, 'gt_labels': gt_labels, 'gt_bboxes_ignore': None, 'gt_masks': gt_masks, } if with_semantic: # assume gt_semantic_seg using scale 1/8 of the img gt_semantic_seg = np.random.randint( 0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8) mm_inputs.update( {'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)}) return mm_inputs ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_necks.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from torch.nn.modules.batchnorm import _BatchNorm from mmdet.models.necks import (FPG, FPN, FPN_CARAFE, NASFCOS_FPN, NASFPN, YOLOXPAFPN, ChannelMapper, CTResNetNeck, DilatedEncoder, DyHead, SSDNeck, YOLOV3Neck) def test_fpn(): """Tests fpn.""" s = 64 in_channels = [8, 16, 32, 64] feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] out_channels = 8 # end_level=-1 is equal to end_level=3 FPN(in_channels=in_channels, out_channels=out_channels, start_level=0, end_level=-1, num_outs=5) FPN(in_channels=in_channels, out_channels=out_channels, start_level=0, end_level=3, num_outs=5) # `num_outs` is not equal to end_level - start_level + 1 with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, end_level=2, num_outs=3) # `num_outs` is not equal to len(in_channels) - start_level with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, num_outs=2) # `end_level` is larger than len(in_channels) - 1 with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, end_level=4, num_outs=2) # `num_outs` is not equal to end_level - start_level with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, end_level=3, num_outs=1) # Invalid `add_extra_convs` option with pytest.raises(AssertionError): FPN(in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs='on_xxx', num_outs=5) fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, num_outs=5) # FPN expects a multiple levels of features per image feats = [ torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range(len(in_channels)) ] outs = fpn_model(feats) assert fpn_model.add_extra_convs == 'on_input' assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Tests for fpn with no extra convs (pooling is used instead) fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=False, num_outs=5) outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs assert not fpn_model.add_extra_convs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Tests for fpn with lateral bns fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, no_norm_on_lateral=False, norm_cfg=dict(type='BN', requires_grad=True), num_outs=5) outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs assert fpn_model.add_extra_convs == 'on_input' for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) bn_exist = False for m in fpn_model.modules(): if isinstance(m, _BatchNorm): bn_exist = True assert bn_exist # Bilinear upsample fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, upsample_cfg=dict(mode='bilinear', align_corners=True), num_outs=5) fpn_model(feats) outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs assert fpn_model.add_extra_convs == 'on_input' for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Scale factor instead of fixed upsample size upsample fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, start_level=1, add_extra_convs=True, upsample_cfg=dict(scale_factor=2), num_outs=5) outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Extra convs source is 'inputs' fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_input', start_level=1, num_outs=5) assert fpn_model.add_extra_convs == 'on_input' outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Extra convs source is 'laterals' fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_lateral', start_level=1, num_outs=5) assert fpn_model.add_extra_convs == 'on_lateral' outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # Extra convs source is 'outputs' fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_output', start_level=1, num_outs=5) assert fpn_model.add_extra_convs == 'on_output' outs = fpn_model(feats) assert len(outs) == fpn_model.num_outs for i in range(fpn_model.num_outs): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) def test_channel_mapper(): """Tests ChannelMapper.""" s = 64 in_channels = [8, 16, 32, 64] feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] out_channels = 8 kernel_size = 3 feats = [ torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range(len(in_channels)) ] # in_channels must be a list with pytest.raises(AssertionError): channel_mapper = ChannelMapper( in_channels=10, out_channels=out_channels, kernel_size=kernel_size) # the length of channel_mapper's inputs must be equal to the length of # in_channels with pytest.raises(AssertionError): channel_mapper = ChannelMapper( in_channels=in_channels[:-1], out_channels=out_channels, kernel_size=kernel_size) channel_mapper(feats) channel_mapper = ChannelMapper( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size) outs = channel_mapper(feats) assert len(outs) == len(feats) for i in range(len(feats)): outs[i].shape[1] == out_channels outs[i].shape[2] == outs[i].shape[3] == s // (2**i) def test_dilated_encoder(): in_channels = 16 out_channels = 32 out_shape = 34 dilated_encoder = DilatedEncoder(in_channels, out_channels, 16, 2, [2, 4, 6, 8]) feat = [torch.rand(1, in_channels, 34, 34)] out_feat = dilated_encoder(feat)[0] assert out_feat.shape == (1, out_channels, out_shape, out_shape) def test_ct_resnet_neck(): # num_filters/num_kernels must be a list with pytest.raises(TypeError): CTResNetNeck( in_channel=10, num_deconv_filters=10, num_deconv_kernels=4) # num_filters/num_kernels must be same length with pytest.raises(AssertionError): CTResNetNeck( in_channel=10, num_deconv_filters=(10, 10), num_deconv_kernels=(4, )) in_channels = 16 num_filters = (8, 8) num_kernels = (4, 4) feat = torch.rand(1, 16, 4, 4) ct_resnet_neck = CTResNetNeck( in_channel=in_channels, num_deconv_filters=num_filters, num_deconv_kernels=num_kernels, use_dcn=False) # feat must be list or tuple with pytest.raises(AssertionError): ct_resnet_neck(feat) out_feat = ct_resnet_neck([feat])[0] assert out_feat.shape == (1, num_filters[-1], 16, 16) if torch.cuda.is_available(): # test dcn ct_resnet_neck = CTResNetNeck( in_channel=in_channels, num_deconv_filters=num_filters, num_deconv_kernels=num_kernels) ct_resnet_neck = ct_resnet_neck.cuda() feat = feat.cuda() out_feat = ct_resnet_neck([feat])[0] assert out_feat.shape == (1, num_filters[-1], 16, 16) def test_yolov3_neck(): # num_scales, in_channels, out_channels must be same length with pytest.raises(AssertionError): YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4]) # len(feats) must equal to num_scales with pytest.raises(AssertionError): neck = YOLOV3Neck( num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2]) feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16)) neck(feats) # test normal channels s = 32 in_channels = [16, 8, 4] out_channels = [8, 4, 2] feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)] feats = [ torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range(len(in_channels) - 1, -1, -1) ] neck = YOLOV3Neck( num_scales=3, in_channels=in_channels, out_channels=out_channels) outs = neck(feats) assert len(outs) == len(feats) for i in range(len(outs)): assert outs[i].shape == \ (1, out_channels[i], feat_sizes[i], feat_sizes[i]) # test more flexible setting s = 32 in_channels = [32, 8, 16] out_channels = [19, 21, 5] feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)] feats = [ torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range(len(in_channels) - 1, -1, -1) ] neck = YOLOV3Neck( num_scales=3, in_channels=in_channels, out_channels=out_channels) outs = neck(feats) assert len(outs) == len(feats) for i in range(len(outs)): assert outs[i].shape == \ (1, out_channels[i], feat_sizes[i], feat_sizes[i]) def test_ssd_neck(): # level_strides/level_paddings must be same length with pytest.raises(AssertionError): SSDNeck( in_channels=[8, 16], out_channels=[8, 16, 32], level_strides=[2], level_paddings=[2, 1]) # length of out_channels must larger than in_channels with pytest.raises(AssertionError): SSDNeck( in_channels=[8, 16], out_channels=[8], level_strides=[2], level_paddings=[2]) # len(out_channels) - len(in_channels) must equal to len(level_strides) with pytest.raises(AssertionError): SSDNeck( in_channels=[8, 16], out_channels=[4, 16, 64], level_strides=[2, 2], level_paddings=[2, 2]) # in_channels must be same with out_channels[:len(in_channels)] with pytest.raises(AssertionError): SSDNeck( in_channels=[8, 16], out_channels=[4, 16, 64], level_strides=[2], level_paddings=[2]) ssd_neck = SSDNeck( in_channels=[4], out_channels=[4, 8, 16], level_strides=[2, 1], level_paddings=[1, 0]) feats = (torch.rand(1, 4, 16, 16), ) outs = ssd_neck(feats) assert outs[0].shape == (1, 4, 16, 16) assert outs[1].shape == (1, 8, 8, 8) assert outs[2].shape == (1, 16, 6, 6) # test SSD-Lite Neck ssd_neck = SSDNeck( in_channels=[4, 8], out_channels=[4, 8, 16], level_strides=[1], level_paddings=[1], l2_norm_scale=None, use_depthwise=True, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6')) assert not hasattr(ssd_neck, 'l2_norm') from mmcv.cnn.bricks import DepthwiseSeparableConvModule assert isinstance(ssd_neck.extra_layers[0][-1], DepthwiseSeparableConvModule) feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8)) outs = ssd_neck(feats) assert outs[0].shape == (1, 4, 8, 8) assert outs[1].shape == (1, 8, 8, 8) assert outs[2].shape == (1, 16, 8, 8) def test_yolox_pafpn(): s = 64 in_channels = [8, 16, 32, 64] feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] out_channels = 24 feats = [ torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range(len(in_channels)) ] neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels) outs = neck(feats) assert len(outs) == len(feats) for i in range(len(feats)): assert outs[i].shape[1] == out_channels assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) # test depth-wise neck = YOLOXPAFPN( in_channels=in_channels, out_channels=out_channels, use_depthwise=True) from mmcv.cnn.bricks import DepthwiseSeparableConvModule assert isinstance(neck.downsamples[0], DepthwiseSeparableConvModule) outs = neck(feats) assert len(outs) == len(feats) for i in range(len(feats)): assert outs[i].shape[1] == out_channels assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) def test_dyhead(): s = 64 in_channels = 8 out_channels = 16 feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] feats = [ torch.rand(1, in_channels, feat_sizes[i], feat_sizes[i]) for i in range(len(feat_sizes)) ] neck = DyHead( in_channels=in_channels, out_channels=out_channels, num_blocks=3) outs = neck(feats) assert len(outs) == len(feats) for i in range(len(outs)): assert outs[i].shape[1] == out_channels assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) feat = torch.rand(1, 8, 4, 4) # input feat must be tuple or list with pytest.raises(AssertionError): neck(feat) def test_fpg(): # end_level=-1 is equal to end_level=3 norm_cfg = dict(type='BN', requires_grad=True) FPG(in_channels=[8, 16, 32, 64], out_channels=8, inter_channels=8, num_outs=5, add_extra_convs=True, start_level=1, end_level=-1, stack_times=9, paths=['bu'] * 9, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_lateral_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_down_trans=dict( type='interpolation_conv', mode='nearest', kernel_size=3, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'), inplace=False), across_up_trans=None, across_skip_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), output_trans=dict( type='last_conv', kernel_size=3, order=('act', 'conv', 'norm'), inplace=False), norm_cfg=norm_cfg, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]) FPG(in_channels=[8, 16, 32, 64], out_channels=8, inter_channels=8, num_outs=5, add_extra_convs=True, start_level=1, end_level=3, stack_times=9, paths=['bu'] * 9, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_lateral_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_down_trans=dict( type='interpolation_conv', mode='nearest', kernel_size=3, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'), inplace=False), across_up_trans=None, across_skip_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), output_trans=dict( type='last_conv', kernel_size=3, order=('act', 'conv', 'norm'), inplace=False), norm_cfg=norm_cfg, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]) # `end_level` is larger than len(in_channels) - 1 with pytest.raises(AssertionError): FPG(in_channels=[8, 16, 32, 64], out_channels=8, stack_times=9, paths=['bu'] * 9, start_level=1, end_level=4, num_outs=2, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]) # `num_outs` is not equal to end_level - start_level + 1 with pytest.raises(AssertionError): FPG(in_channels=[8, 16, 32, 64], out_channels=8, stack_times=9, paths=['bu'] * 9, start_level=1, end_level=2, num_outs=3, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]) def test_fpn_carafe(): # end_level=-1 is equal to end_level=3 FPN_CARAFE( in_channels=[8, 16, 32, 64], out_channels=8, start_level=0, end_level=3, num_outs=4) FPN_CARAFE( in_channels=[8, 16, 32, 64], out_channels=8, start_level=0, end_level=-1, num_outs=4) # `end_level` is larger than len(in_channels) - 1 with pytest.raises(AssertionError): FPN_CARAFE( in_channels=[8, 16, 32, 64], out_channels=8, start_level=1, end_level=4, num_outs=2) # `num_outs` is not equal to end_level - start_level + 1 with pytest.raises(AssertionError): FPN_CARAFE( in_channels=[8, 16, 32, 64], out_channels=8, start_level=1, end_level=2, num_outs=3) def test_nas_fpn(): # end_level=-1 is equal to end_level=3 NASFPN( in_channels=[8, 16, 32, 64], out_channels=8, stack_times=9, start_level=0, end_level=3, num_outs=4) NASFPN( in_channels=[8, 16, 32, 64], out_channels=8, stack_times=9, start_level=0, end_level=-1, num_outs=4) # `end_level` is larger than len(in_channels) - 1 with pytest.raises(AssertionError): NASFPN( in_channels=[8, 16, 32, 64], out_channels=8, stack_times=9, start_level=1, end_level=4, num_outs=2) # `num_outs` is not equal to end_level - start_level + 1 with pytest.raises(AssertionError): NASFPN( in_channels=[8, 16, 32, 64], out_channels=8, stack_times=9, start_level=1, end_level=2, num_outs=3) def test_nasfcos_fpn(): # end_level=-1 is equal to end_level=3 NASFCOS_FPN( in_channels=[8, 16, 32, 64], out_channels=8, start_level=0, end_level=3, num_outs=4) NASFCOS_FPN( in_channels=[8, 16, 32, 64], out_channels=8, start_level=0, end_level=-1, num_outs=4) # `end_level` is larger than len(in_channels) - 1 with pytest.raises(AssertionError): NASFCOS_FPN( in_channels=[8, 16, 32, 64], out_channels=8, start_level=1, end_level=4, num_outs=2) # `num_outs` is not equal to end_level - start_level + 1 with pytest.raises(AssertionError): NASFCOS_FPN( in_channels=[8, 16, 32, 64], out_channels=8, start_level=1, end_level=2, num_outs=3) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_plugins.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmcv import ConfigDict from mmcv.cnn import build_plugin_layer from mmdet.models.plugins import DropBlock def test_dropblock(): feat = torch.rand(1, 1, 11, 11) drop_prob = 1.0 dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0) out_feat = dropblock(feat) assert (out_feat == 0).all() and out_feat.shape == feat.shape drop_prob = 0.5 dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0) out_feat = dropblock(feat) assert out_feat.shape == feat.shape # drop_prob must be (0,1] with pytest.raises(AssertionError): DropBlock(1.5, 3) # block_size cannot be an even number with pytest.raises(AssertionError): DropBlock(0.5, 2) # warmup_iters cannot be less than 0 with pytest.raises(AssertionError): DropBlock(0.5, 3, -1) def test_pixel_decoder(): base_channels = 64 pixel_decoder_cfg = ConfigDict( dict( type='PixelDecoder', in_channels=[base_channels * 2**i for i in range(4)], feat_channels=base_channels, out_channels=base_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'))) self = build_plugin_layer(pixel_decoder_cfg)[1] img_metas = [{}, {}] feats = [ torch.rand((2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i))) for i in range(4) ] mask_feature, memory = self(feats, img_metas) assert (memory == feats[-1]).all() assert mask_feature.shape == feats[0].shape def test_transformer_encoder_pixel_decoder(): base_channels = 64 pixel_decoder_cfg = ConfigDict( dict( type='TransformerEncoderPixelDecoder', in_channels=[base_channels * 2**i for i in range(4)], feat_channels=base_channels, out_channels=base_channels, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=base_channels, num_heads=8, attn_drop=0.1, proj_drop=0.1, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=base_channels, feedforward_channels=base_channels * 8, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.1, dropout_layer=None, add_identity=True), operation_order=('self_attn', 'norm', 'ffn', 'norm'), norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=base_channels // 2, normalize=True))) self = build_plugin_layer(pixel_decoder_cfg)[1] img_metas = [{ 'batch_input_shape': (128, 160), 'img_shape': (120, 160, 3), }, { 'batch_input_shape': (128, 160), 'img_shape': (125, 160, 3), }] feats = [ torch.rand((2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i))) for i in range(4) ] mask_feature, memory = self(feats, img_metas) assert memory.shape[-2:] == feats[-1].shape[-2:] assert mask_feature.shape == feats[0].shape def test_msdeformattn_pixel_decoder(): base_channels = 64 pixel_decoder_cfg = ConfigDict( dict( type='MSDeformAttnPixelDecoder', in_channels=[base_channels * 2**i for i in range(4)], strides=[4, 8, 16, 32], feat_channels=base_channels, out_channels=base_channels, num_outs=3, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiScaleDeformableAttention', embed_dims=base_channels, num_heads=8, num_levels=3, num_points=4, im2col_step=64, dropout=0.0, batch_first=False, norm_cfg=None, init_cfg=None), ffn_cfgs=dict( type='FFN', embed_dims=base_channels, feedforward_channels=base_channels * 4, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), operation_order=('self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=base_channels // 2, normalize=True), init_cfg=None), ) self = build_plugin_layer(pixel_decoder_cfg)[1] feats = [ torch.rand((2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i))) for i in range(4) ] mask_feature, multi_scale_features = self(feats) assert mask_feature.shape == feats[0].shape assert len(multi_scale_features) == 3 multi_scale_features = multi_scale_features[::-1] for i in range(3): assert multi_scale_features[i].shape[-2:] == feats[i + 1].shape[-2:] ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_roi_heads/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .utils import _dummy_bbox_sampling __all__ = ['_dummy_bbox_sampling'] ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_roi_heads/test_bbox_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import pytest import torch from mmdet.core import bbox2roi from mmdet.models.roi_heads.bbox_heads import BBoxHead from .utils import _dummy_bbox_sampling def test_bbox_head_loss(): """Tests bbox head loss when truth is empty and non-empty.""" self = BBoxHead(in_channels=8, roi_feat_size=3) # Dummy proposals proposal_list = [ torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]), ] target_cfg = mmcv.Config(dict(pos_weight=1)) # Test bbox loss when truth is empty gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels) bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels, target_cfg) labels, label_weights, bbox_targets, bbox_weights = bbox_targets # Create dummy features "extracted" for each sampled bbox num_sampled = sum(len(res.bboxes) for res in sampling_results) rois = bbox2roi([res.bboxes for res in sampling_results]) dummy_feats = torch.rand(num_sampled, 8 * 3 * 3) cls_scores, bbox_preds = self.forward(dummy_feats) losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights, bbox_targets, bbox_weights) assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero' assert losses.get('loss_bbox', 0) == 0, 'empty gt loss should be zero' # Test bbox loss when truth is non-empty gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels) rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels, target_cfg) labels, label_weights, bbox_targets, bbox_weights = bbox_targets # Create dummy features "extracted" for each sampled bbox num_sampled = sum(len(res.bboxes) for res in sampling_results) dummy_feats = torch.rand(num_sampled, 8 * 3 * 3) cls_scores, bbox_preds = self.forward(dummy_feats) losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights, bbox_targets, bbox_weights) assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero' assert losses.get('loss_bbox', 0) > 0, 'box-loss should be non-zero' @pytest.mark.parametrize('num_sample', [0, 1, 2]) def test_bbox_head_get_bboxes(num_sample): self = BBoxHead(reg_class_agnostic=True) num_class = 6 rois = torch.rand((num_sample, 5)) cls_score = torch.rand((num_sample, num_class)) bbox_pred = torch.rand((num_sample, 4)) scale_factor = np.array([2.0, 2.0, 2.0, 2.0]) det_bboxes, det_labels = self.get_bboxes( rois, cls_score, bbox_pred, None, scale_factor, rescale=True) if num_sample == 0: assert len(det_bboxes) == 0 and len(det_labels) == 0 else: assert det_bboxes.shape == bbox_pred.shape assert det_labels.shape == cls_score.shape def test_refine_boxes(): """Mirrors the doctest in ``mmdet.models.bbox_heads.bbox_head.BBoxHead.refine_boxes`` but checks for multiple values of n_roi / n_img.""" self = BBoxHead(reg_class_agnostic=True) test_settings = [ # Corner case: less rois than images { 'n_roi': 2, 'n_img': 4, 'rng': 34285940 }, # Corner case: no images { 'n_roi': 0, 'n_img': 0, 'rng': 52925222 }, # Corner cases: few images / rois { 'n_roi': 1, 'n_img': 1, 'rng': 1200281 }, { 'n_roi': 2, 'n_img': 1, 'rng': 1200282 }, { 'n_roi': 2, 'n_img': 2, 'rng': 1200283 }, { 'n_roi': 1, 'n_img': 2, 'rng': 1200284 }, # Corner case: no rois few images { 'n_roi': 0, 'n_img': 1, 'rng': 23955860 }, { 'n_roi': 0, 'n_img': 2, 'rng': 25830516 }, # Corner case: no rois many images { 'n_roi': 0, 'n_img': 10, 'rng': 671346 }, { 'n_roi': 0, 'n_img': 20, 'rng': 699807 }, # Corner case: cal_similarity num rois and images { 'n_roi': 20, 'n_img': 20, 'rng': 1200238 }, { 'n_roi': 10, 'n_img': 20, 'rng': 1200238 }, { 'n_roi': 5, 'n_img': 5, 'rng': 1200238 }, # ---------------------------------- # Common case: more rois than images { 'n_roi': 100, 'n_img': 1, 'rng': 337156 }, { 'n_roi': 150, 'n_img': 2, 'rng': 275898 }, { 'n_roi': 500, 'n_img': 5, 'rng': 4903221 }, ] for demokw in test_settings: try: n_roi = demokw['n_roi'] n_img = demokw['n_img'] rng = demokw['rng'] print(f'Test refine_boxes case: {demokw!r}') tup = _demodata_refine_boxes(n_roi, n_img, rng=rng) rois, labels, bbox_preds, pos_is_gts, img_metas = tup bboxes_list = self.refine_bboxes(rois, labels, bbox_preds, pos_is_gts, img_metas) assert len(bboxes_list) == n_img assert sum(map(len, bboxes_list)) <= n_roi assert all(b.shape[1] == 4 for b in bboxes_list) except Exception: print(f'Test failed with demokw={demokw!r}') raise def _demodata_refine_boxes(n_roi, n_img, rng=0): """Create random test data for the ``mmdet.models.bbox_heads.bbox_head.BBoxHead.refine_boxes`` method.""" import numpy as np from mmdet.core.bbox.demodata import ensure_rng, random_boxes try: import kwarray except ImportError: import pytest pytest.skip('kwarray is required for this test') scale = 512 rng = ensure_rng(rng) img_metas = [{'img_shape': (scale, scale)} for _ in range(n_img)] # Create rois in the expected format roi_boxes = random_boxes(n_roi, scale=scale, rng=rng) if n_img == 0: assert n_roi == 0, 'cannot have any rois if there are no images' img_ids = torch.empty((0, ), dtype=torch.long) roi_boxes = torch.empty((0, 4), dtype=torch.float32) else: img_ids = rng.randint(0, n_img, (n_roi, )) img_ids = torch.from_numpy(img_ids) rois = torch.cat([img_ids[:, None].float(), roi_boxes], dim=1) # Create other args labels = rng.randint(0, 2, (n_roi, )) labels = torch.from_numpy(labels).long() bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) # For each image, pretend random positive boxes are gts is_label_pos = (labels.numpy() > 0).astype(np.int) lbl_per_img = kwarray.group_items(is_label_pos, img_ids.numpy()) pos_per_img = [sum(lbl_per_img.get(gid, [])) for gid in range(n_img)] # randomly generate with numpy then sort with torch _pos_is_gts = [ rng.randint(0, 2, (npos, )).astype(np.uint8) for npos in pos_per_img ] pos_is_gts = [ torch.from_numpy(p).sort(descending=True)[0] for p in _pos_is_gts ] return rois, labels, bbox_preds, pos_is_gts, img_metas ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_roi_heads/test_mask_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead, MaskIoUHead) from .utils import _dummy_bbox_sampling def test_mask_head_loss(): """Test mask head loss when mask target is empty.""" self = FCNMaskHead( num_convs=1, roi_feat_size=6, in_channels=8, conv_out_channels=8, num_classes=8) # Dummy proposals proposal_list = [ torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]), ] gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels) # create dummy mask import numpy as np from mmdet.core import BitmapMasks dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8) gt_masks = [BitmapMasks(dummy_mask, 160, 240)] # create dummy train_cfg train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5)) # Create dummy features "extracted" for each sampled bbox num_sampled = sum(len(res.bboxes) for res in sampling_results) dummy_feats = torch.rand(num_sampled, 8, 6, 6) mask_pred = self.forward(dummy_feats) mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.loss(mask_pred, mask_targets, pos_labels) onegt_mask_loss = sum(loss_mask['loss_mask']) assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero' # test mask_iou_head mask_iou_head = MaskIoUHead( num_convs=1, num_fcs=1, roi_feat_size=6, in_channels=8, conv_out_channels=8, fc_out_channels=8, num_classes=8) pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels] mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred) pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels] mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks, pos_mask_pred, mask_targets, train_cfg) loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets) onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum() assert onegt_mask_iou_loss.item() >= 0 # test dynamic_mask_head dummy_proposal_feats = torch.rand(num_sampled, 8) dynamic_mask_head = DynamicMaskHead( dynamic_conv_cfg=dict( type='DynamicConv', in_channels=8, feat_channels=8, out_channels=8, input_feat_shape=6, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), num_convs=1, num_classes=8, in_channels=8, roi_feat_size=6) mask_pred = dynamic_mask_head(dummy_feats, dummy_proposal_feats) mask_target = dynamic_mask_head.get_targets(sampling_results, gt_masks, train_cfg) loss_mask = dynamic_mask_head.loss(mask_pred, mask_target, pos_labels) loss_mask = loss_mask['loss_mask'].sum() assert loss_mask.item() >= 0 ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_roi_heads/test_roi_extractor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.roi_heads.roi_extractors import GenericRoIExtractor def test_groie(): # test with pre/post cfg = dict( roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32], pre_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False, ), post_cfg=dict( type='ConvModule', in_channels=256, out_channels=256, kernel_size=5, padding=2, inplace=False)) groie = GenericRoIExtractor(**cfg) feats = ( torch.rand((1, 256, 200, 336)), torch.rand((1, 256, 100, 168)), torch.rand((1, 256, 50, 84)), torch.rand((1, 256, 25, 42)), ) rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]]) res = groie(feats, rois) assert res.shape == torch.Size([1, 256, 7, 7]) # test w.o. pre/post cfg = dict( roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]) groie = GenericRoIExtractor(**cfg) feats = ( torch.rand((1, 256, 200, 336)), torch.rand((1, 256, 100, 168)), torch.rand((1, 256, 50, 84)), torch.rand((1, 256, 25, 42)), ) rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]]) res = groie(feats, rois) assert res.shape == torch.Size([1, 256, 7, 7]) # test w.o. pre/post concat cfg = dict( aggregation='concat', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256 * 4, featmap_strides=[4, 8, 16, 32]) groie = GenericRoIExtractor(**cfg) feats = ( torch.rand((1, 256, 200, 336)), torch.rand((1, 256, 100, 168)), torch.rand((1, 256, 50, 84)), torch.rand((1, 256, 25, 42)), ) rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]]) res = groie(feats, rois) assert res.shape == torch.Size([1, 1024, 7, 7]) # test not supported aggregate method with pytest.raises(AssertionError): cfg = dict( aggregation='not support', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=1024, featmap_strides=[4, 8, 16, 32]) _ = GenericRoIExtractor(**cfg) # test concat channels number cfg = dict( aggregation='concat', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256 * 5, # 256*5 != 256*4 featmap_strides=[4, 8, 16, 32]) groie = GenericRoIExtractor(**cfg) feats = ( torch.rand((1, 256, 200, 336)), torch.rand((1, 256, 100, 168)), torch.rand((1, 256, 50, 84)), torch.rand((1, 256, 25, 42)), ) rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]]) # out_channels does not sum of feat channels with pytest.raises(AssertionError): _ = groie(feats, rois) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_roi_heads/test_sabl_bbox_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.core import bbox2roi from mmdet.models.roi_heads.bbox_heads import SABLHead from .utils import _dummy_bbox_sampling def test_sabl_bbox_head_loss(): """Tests bbox head loss when truth is empty and non-empty.""" self = SABLHead( num_classes=4, cls_in_channels=3, reg_in_channels=3, cls_out_channels=3, reg_offset_out_channels=3, reg_cls_out_channels=3, roi_feat_size=7) # Dummy proposals proposal_list = [ torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]), ] target_cfg = mmcv.Config(dict(pos_weight=1)) # Test bbox loss when truth is empty gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels) bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels, target_cfg) labels, label_weights, bbox_targets, bbox_weights = bbox_targets # Create dummy features "extracted" for each sampled bbox num_sampled = sum(len(res.bboxes) for res in sampling_results) rois = bbox2roi([res.bboxes for res in sampling_results]) dummy_feats = torch.rand(num_sampled, 3, 7, 7) cls_scores, bbox_preds = self.forward(dummy_feats) losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights, bbox_targets, bbox_weights) assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero' assert losses.get('loss_bbox_cls', 0) == 0, 'empty gt bbox-cls-loss should be zero' assert losses.get('loss_bbox_reg', 0) == 0, 'empty gt bbox-reg-loss should be zero' # Test bbox loss when truth is non-empty gt_bboxes = [ torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), ] gt_labels = [torch.LongTensor([2])] sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels) rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels, target_cfg) labels, label_weights, bbox_targets, bbox_weights = bbox_targets # Create dummy features "extracted" for each sampled bbox num_sampled = sum(len(res.bboxes) for res in sampling_results) dummy_feats = torch.rand(num_sampled, 3, 7, 7) cls_scores, bbox_preds = self.forward(dummy_feats) losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights, bbox_targets, bbox_weights) assert losses.get('loss_bbox_cls', 0) > 0, 'empty gt bbox-cls-loss should be zero' assert losses.get('loss_bbox_reg', 0) > 0, 'empty gt bbox-reg-loss should be zero' ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_roi_heads/utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import build_assigner, build_sampler def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels): """Create sample results that can be passed to BBoxHead.get_targets.""" num_imgs = 1 feat = torch.rand(1, 1, 3, 3) assign_config = dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=-1) sampler_config = dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True) bbox_assigner = build_assigner(assign_config) bbox_sampler = build_sampler(sampler_config) gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=feat) sampling_results.append(sampling_result) return sampling_results ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_seg_heads/test_maskformer_fusion_head.py ================================================ import pytest import torch from mmcv import ConfigDict from mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead def test_maskformer_fusion_head(): img_metas = [ { 'batch_input_shape': (128, 160), 'img_shape': (126, 160, 3), 'ori_shape': (63, 80, 3), 'pad_shape': (128, 160, 3) }, ] num_things_classes = 80 num_stuff_classes = 53 num_classes = num_things_classes + num_stuff_classes config = ConfigDict( type='MaskFormerFusionHead', num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_panoptic=None, test_cfg=dict( panoptic_on=True, semantic_on=False, instance_on=True, max_per_image=100, object_mask_thr=0.8, iou_thr=0.8, filter_low_score=False), init_cfg=None) self = MaskFormerFusionHead(**config) # test forward_train assert self.forward_train() == dict() mask_cls_results = torch.rand((1, 100, num_classes + 1)) mask_pred_results = torch.rand((1, 100, 128, 160)) # test panoptic_postprocess and instance_postprocess results = self.simple_test(mask_cls_results, mask_pred_results, img_metas) assert 'ins_results' in results[0] and 'pan_results' in results[0] # test semantic_postprocess config.test_cfg.semantic_on = True with pytest.raises(AssertionError): self.simple_test(mask_cls_results, mask_pred_results, img_metas) with pytest.raises(NotImplementedError): self.semantic_postprocess(mask_cls_results, mask_pred_results) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_utils/test_brick_wrappers.py ================================================ from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from mmdet.models.utils import AdaptiveAvgPool2d, adaptive_avg_pool2d if torch.__version__ != 'parrots': torch_version = '1.7' else: torch_version = 'parrots' @patch('torch.__version__', torch_version) def test_adaptive_avg_pool2d(): # Test the empty batch dimension # Test the two input conditions x_empty = torch.randn(0, 3, 4, 5) # 1. tuple[int, int] wrapper_out = adaptive_avg_pool2d(x_empty, (2, 2)) assert wrapper_out.shape == (0, 3, 2, 2) # 2. int wrapper_out = adaptive_avg_pool2d(x_empty, 2) assert wrapper_out.shape == (0, 3, 2, 2) # wrapper op with 3-dim input x_normal = torch.randn(3, 3, 4, 5) wrapper_out = adaptive_avg_pool2d(x_normal, (2, 2)) ref_out = F.adaptive_avg_pool2d(x_normal, (2, 2)) assert wrapper_out.shape == (3, 3, 2, 2) assert torch.equal(wrapper_out, ref_out) wrapper_out = adaptive_avg_pool2d(x_normal, 2) ref_out = F.adaptive_avg_pool2d(x_normal, 2) assert wrapper_out.shape == (3, 3, 2, 2) assert torch.equal(wrapper_out, ref_out) @patch('torch.__version__', torch_version) def test_AdaptiveAvgPool2d(): # Test the empty batch dimension x_empty = torch.randn(0, 3, 4, 5) # Test the four input conditions # 1. tuple[int, int] wrapper = AdaptiveAvgPool2d((2, 2)) wrapper_out = wrapper(x_empty) assert wrapper_out.shape == (0, 3, 2, 2) # 2. int wrapper = AdaptiveAvgPool2d(2) wrapper_out = wrapper(x_empty) assert wrapper_out.shape == (0, 3, 2, 2) # 3. tuple[None, int] wrapper = AdaptiveAvgPool2d((None, 2)) wrapper_out = wrapper(x_empty) assert wrapper_out.shape == (0, 3, 4, 2) # 3. tuple[int, None] wrapper = AdaptiveAvgPool2d((2, None)) wrapper_out = wrapper(x_empty) assert wrapper_out.shape == (0, 3, 2, 5) # Test the normal batch dimension x_normal = torch.randn(3, 3, 4, 5) wrapper = AdaptiveAvgPool2d((2, 2)) ref = nn.AdaptiveAvgPool2d((2, 2)) wrapper_out = wrapper(x_normal) ref_out = ref(x_normal) assert wrapper_out.shape == (3, 3, 2, 2) assert torch.equal(wrapper_out, ref_out) wrapper = AdaptiveAvgPool2d(2) ref = nn.AdaptiveAvgPool2d(2) wrapper_out = wrapper(x_normal) ref_out = ref(x_normal) assert wrapper_out.shape == (3, 3, 2, 2) assert torch.equal(wrapper_out, ref_out) wrapper = AdaptiveAvgPool2d((None, 2)) ref = nn.AdaptiveAvgPool2d((None, 2)) wrapper_out = wrapper(x_normal) ref_out = ref(x_normal) assert wrapper_out.shape == (3, 3, 4, 2) assert torch.equal(wrapper_out, ref_out) wrapper = AdaptiveAvgPool2d((2, None)) ref = nn.AdaptiveAvgPool2d((2, None)) wrapper_out = wrapper(x_normal) ref_out = ref(x_normal) assert wrapper_out.shape == (3, 3, 2, 5) assert torch.equal(wrapper_out, ref_out) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_utils/test_conv_upsample.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.utils import ConvUpsample @pytest.mark.parametrize('num_layers', [0, 1, 2]) def test_conv_upsample(num_layers): num_upsample = num_layers if num_layers > 0 else 0 num_layers = num_layers if num_layers > 0 else 1 layer = ConvUpsample( 10, 5, num_layers=num_layers, num_upsample=num_upsample, conv_cfg=None, norm_cfg=None) size = 5 x = torch.randn((1, 10, size, size)) size = size * pow(2, num_upsample) x = layer(x) assert x.shape[-2:] == (size, size) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_utils/test_inverted_residual.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmcv.cnn import is_norm from torch.nn.modules import GroupNorm from mmdet.models.utils import InvertedResidual, SELayer def test_inverted_residual(): with pytest.raises(AssertionError): # stride must be in [1, 2] InvertedResidual(16, 16, 32, stride=3) with pytest.raises(AssertionError): # se_cfg must be None or dict InvertedResidual(16, 16, 32, se_cfg=list()) with pytest.raises(AssertionError): # in_channeld and mid_channels must be the same if # with_expand_conv is False InvertedResidual(16, 16, 32, with_expand_conv=False) # Test InvertedResidual forward, stride=1 block = InvertedResidual(16, 16, 32, stride=1) x = torch.randn(1, 16, 56, 56) x_out = block(x) assert getattr(block, 'se', None) is None assert block.with_res_shortcut assert x_out.shape == torch.Size((1, 16, 56, 56)) # Test InvertedResidual forward, stride=2 block = InvertedResidual(16, 16, 32, stride=2) x = torch.randn(1, 16, 56, 56) x_out = block(x) assert not block.with_res_shortcut assert x_out.shape == torch.Size((1, 16, 28, 28)) # Test InvertedResidual forward with se layer se_cfg = dict(channels=32) block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg) x = torch.randn(1, 16, 56, 56) x_out = block(x) assert isinstance(block.se, SELayer) assert x_out.shape == torch.Size((1, 16, 56, 56)) # Test InvertedResidual forward, with_expand_conv=False block = InvertedResidual(32, 16, 32, with_expand_conv=False) x = torch.randn(1, 32, 56, 56) x_out = block(x) assert getattr(block, 'expand_conv', None) is None assert x_out.shape == torch.Size((1, 16, 56, 56)) # Test InvertedResidual forward with GroupNorm block = InvertedResidual( 16, 16, 32, norm_cfg=dict(type='GN', num_groups=2)) x = torch.randn(1, 16, 56, 56) x_out = block(x) for m in block.modules(): if is_norm(m): assert isinstance(m, GroupNorm) assert x_out.shape == torch.Size((1, 16, 56, 56)) # Test InvertedResidual forward with HSigmoid block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid')) x = torch.randn(1, 16, 56, 56) x_out = block(x) assert x_out.shape == torch.Size((1, 16, 56, 56)) # Test InvertedResidual forward with checkpoint block = InvertedResidual(16, 16, 32, with_cp=True) x = torch.randn(1, 16, 56, 56) x_out = block(x) assert block.with_cp assert x_out.shape == torch.Size((1, 16, 56, 56)) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_utils/test_model_misc.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from torch.autograd import gradcheck from mmdet.models.utils import interpolate_as, sigmoid_geometric_mean def test_interpolate_as(): source = torch.rand((1, 5, 4, 4)) target = torch.rand((1, 1, 16, 16)) # Test 4D source and target result = interpolate_as(source, target) assert result.shape == torch.Size((1, 5, 16, 16)) # Test 3D target result = interpolate_as(source, target.squeeze(0)) assert result.shape == torch.Size((1, 5, 16, 16)) # Test 3D source result = interpolate_as(source.squeeze(0), target) assert result.shape == torch.Size((5, 16, 16)) # Test type(target) == np.ndarray target = np.random.rand(16, 16) result = interpolate_as(source.squeeze(0), target) assert result.shape == torch.Size((5, 16, 16)) def test_sigmoid_geometric_mean(): x = torch.randn(20, 20, dtype=torch.double, requires_grad=True) y = torch.randn(20, 20, dtype=torch.double, requires_grad=True) inputs = (x, y) test = gradcheck(sigmoid_geometric_mean, inputs, eps=1e-6, atol=1e-4) assert test ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_utils/test_position_encoding.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.models.utils import (LearnedPositionalEncoding, SinePositionalEncoding) def test_sine_positional_encoding(num_feats=16, batch_size=2): # test invalid type of scale with pytest.raises(AssertionError): module = SinePositionalEncoding( num_feats, scale=(3., ), normalize=True) module = SinePositionalEncoding(num_feats) h, w = 10, 6 mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int) assert not module.normalize out = module(mask) assert out.shape == (batch_size, num_feats * 2, h, w) # set normalize module = SinePositionalEncoding(num_feats, normalize=True) assert module.normalize out = module(mask) assert out.shape == (batch_size, num_feats * 2, h, w) def test_learned_positional_encoding(num_feats=16, row_num_embed=10, col_num_embed=10, batch_size=2): module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed) assert module.row_embed.weight.shape == (row_num_embed, num_feats) assert module.col_embed.weight.shape == (col_num_embed, num_feats) h, w = 10, 6 mask = torch.rand(batch_size, h, w) > 0.5 out = module(mask) assert out.shape == (batch_size, num_feats * 2, h, w) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_utils/test_se_layer.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch import torch.nn.functional as F from mmcv.cnn import constant_init from mmdet.models.utils import DyReLU, SELayer def test_se_layer(): with pytest.raises(AssertionError): # act_cfg sequence length must equal to 2 SELayer(channels=32, act_cfg=(dict(type='ReLU'), )) with pytest.raises(AssertionError): # act_cfg sequence must be a tuple of dict SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')]) # Test SELayer forward layer = SELayer(channels=32) layer.init_weights() layer.train() x = torch.randn((1, 32, 10, 10)) x_out = layer(x) assert x_out.shape == torch.Size((1, 32, 10, 10)) def test_dyrelu(): with pytest.raises(AssertionError): # act_cfg sequence length must equal to 2 DyReLU(channels=32, act_cfg=(dict(type='ReLU'), )) with pytest.raises(AssertionError): # act_cfg sequence must be a tuple of dict DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')]) # Test DyReLU forward layer = DyReLU(channels=32) layer.init_weights() layer.train() x = torch.randn((1, 32, 10, 10)) x_out = layer(x) assert x_out.shape == torch.Size((1, 32, 10, 10)) # DyReLU should act as standard (static) ReLU # when eliminating the effect of SE-like module layer = DyReLU(channels=32) constant_init(layer.conv2.conv, 0) layer.train() x = torch.randn((1, 32, 10, 10)) x_out = layer(x) relu_out = F.relu(x) assert torch.equal(x_out, relu_out) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_models/test_utils/test_transformer.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmcv.utils import ConfigDict from mmdet.models.utils.transformer import (AdaptivePadding, DetrTransformerDecoder, DetrTransformerEncoder, PatchEmbed, PatchMerging, Transformer) def test_adaptive_padding(): for padding in ('same', 'corner'): kernel_size = 16 stride = 16 dilation = 1 input = torch.rand(1, 1, 15, 17) pool = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) out = pool(input) # padding to divisible by 16 assert (out.shape[2], out.shape[3]) == (16, 32) input = torch.rand(1, 1, 16, 17) out = pool(input) # padding to divisible by 16 assert (out.shape[2], out.shape[3]) == (16, 32) kernel_size = (2, 2) stride = (2, 2) dilation = (1, 1) adap_pad = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 11, 13) out = adap_pad(input) # padding to divisible by 2 assert (out.shape[2], out.shape[3]) == (12, 14) kernel_size = (2, 2) stride = (10, 10) dilation = (1, 1) adap_pad = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 10, 13) out = adap_pad(input) # no padding assert (out.shape[2], out.shape[3]) == (10, 13) kernel_size = (11, 11) adap_pad = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) input = torch.rand(1, 1, 11, 13) out = adap_pad(input) # all padding assert (out.shape[2], out.shape[3]) == (21, 21) # test padding as kernel is (7,9) input = torch.rand(1, 1, 11, 13) stride = (3, 4) kernel_size = (4, 5) dilation = (2, 2) # actually (7, 9) adap_pad = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) dilation_out = adap_pad(input) assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21) kernel_size = (7, 9) dilation = (1, 1) adap_pad = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) kernel79_out = adap_pad(input) assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21) assert kernel79_out.shape == dilation_out.shape # assert only support "same" "corner" with pytest.raises(AssertionError): AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=1) def test_patch_embed(): B = 2 H = 3 W = 4 C = 3 embed_dims = 10 kernel_size = 3 stride = 1 dummy_input = torch.rand(B, C, H, W) patch_merge_1 = PatchEmbed( in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=1, norm_cfg=None) x1, shape = patch_merge_1(dummy_input) # test out shape assert x1.shape == (2, 2, 10) # test outsize is correct assert shape == (1, 2) # test L = out_h * out_w assert shape[0] * shape[1] == x1.shape[1] B = 2 H = 10 W = 10 C = 3 embed_dims = 10 kernel_size = 5 stride = 2 dummy_input = torch.rand(B, C, H, W) # test dilation patch_merge_2 = PatchEmbed( in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=None, ) x2, shape = patch_merge_2(dummy_input) # test out shape assert x2.shape == (2, 1, 10) # test outsize is correct assert shape == (1, 1) # test L = out_h * out_w assert shape[0] * shape[1] == x2.shape[1] stride = 2 input_size = (10, 10) dummy_input = torch.rand(B, C, H, W) # test stride and norm patch_merge_3 = PatchEmbed( in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) x3, shape = patch_merge_3(dummy_input) # test out shape assert x3.shape == (2, 1, 10) # test outsize is correct assert shape == (1, 1) # test L = out_h * out_w assert shape[0] * shape[1] == x3.shape[1] # test the init_out_size with nn.Unfold assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 - 1) // 2 + 1 assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 - 1) // 2 + 1 H = 11 W = 12 input_size = (H, W) dummy_input = torch.rand(B, C, H, W) # test stride and norm patch_merge_3 = PatchEmbed( in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) _, shape = patch_merge_3(dummy_input) # when input_size equal to real input # the out_size should be equal to `init_out_size` assert shape == patch_merge_3.init_out_size input_size = (H, W) dummy_input = torch.rand(B, C, H, W) # test stride and norm patch_merge_3 = PatchEmbed( in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size) _, shape = patch_merge_3(dummy_input) # when input_size equal to real input # the out_size should be equal to `init_out_size` assert shape == patch_merge_3.init_out_size # test adap padding for padding in ('same', 'corner'): in_c = 2 embed_dims = 3 B = 2 # test stride is 1 input_size = (5, 5) kernel_size = (5, 5) stride = (1, 1) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed( in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) x_out, out_size = patch_embed(x) assert x_out.size() == (B, 25, 3) assert out_size == (5, 5) assert x_out.size(1) == out_size[0] * out_size[1] # test kernel_size == stride input_size = (5, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed( in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) x_out, out_size = patch_embed(x) assert x_out.size() == (B, 1, 3) assert out_size == (1, 1) assert x_out.size(1) == out_size[0] * out_size[1] # test kernel_size == stride input_size = (6, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed( in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) x_out, out_size = patch_embed(x) assert x_out.size() == (B, 2, 3) assert out_size == (2, 1) assert x_out.size(1) == out_size[0] * out_size[1] # test different kernel_size with different stride input_size = (6, 5) kernel_size = (6, 2) stride = (6, 2) dilation = 1 bias = False x = torch.rand(B, in_c, *input_size) patch_embed = PatchEmbed( in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) x_out, out_size = patch_embed(x) assert x_out.size() == (B, 3, 3) assert out_size == (1, 3) assert x_out.size(1) == out_size[0] * out_size[1] def test_patch_merging(): # Test the model with int padding in_c = 3 out_c = 4 kernel_size = 3 stride = 3 padding = 1 dilation = 1 bias = False # test the case `pad_to_stride` is False patch_merge = PatchMerging( in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) B, L, C = 1, 100, 3 input_size = (10, 10) x = torch.rand(B, L, C) x_out, out_size = patch_merge(x, input_size) assert x_out.size() == (1, 16, 4) assert out_size == (4, 4) # assert out size is consistent with real output assert x_out.size(1) == out_size[0] * out_size[1] in_c = 4 out_c = 5 kernel_size = 6 stride = 3 padding = 2 dilation = 2 bias = False patch_merge = PatchMerging( in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) B, L, C = 1, 100, 4 input_size = (10, 10) x = torch.rand(B, L, C) x_out, out_size = patch_merge(x, input_size) assert x_out.size() == (1, 4, 5) assert out_size == (2, 2) # assert out size is consistent with real output assert x_out.size(1) == out_size[0] * out_size[1] # Test with adaptive padding for padding in ('same', 'corner'): in_c = 2 out_c = 3 B = 2 # test stride is 1 input_size = (5, 5) kernel_size = (5, 5) stride = (1, 1) dilation = 1 bias = False L = input_size[0] * input_size[1] x = torch.rand(B, L, in_c) patch_merge = PatchMerging( in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) x_out, out_size = patch_merge(x, input_size) assert x_out.size() == (B, 25, 3) assert out_size == (5, 5) assert x_out.size(1) == out_size[0] * out_size[1] # test kernel_size == stride input_size = (5, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False L = input_size[0] * input_size[1] x = torch.rand(B, L, in_c) patch_merge = PatchMerging( in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) x_out, out_size = patch_merge(x, input_size) assert x_out.size() == (B, 1, 3) assert out_size == (1, 1) assert x_out.size(1) == out_size[0] * out_size[1] # test kernel_size == stride input_size = (6, 5) kernel_size = (5, 5) stride = (5, 5) dilation = 1 bias = False L = input_size[0] * input_size[1] x = torch.rand(B, L, in_c) patch_merge = PatchMerging( in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) x_out, out_size = patch_merge(x, input_size) assert x_out.size() == (B, 2, 3) assert out_size == (2, 1) assert x_out.size(1) == out_size[0] * out_size[1] # test different kernel_size with different stride input_size = (6, 5) kernel_size = (6, 2) stride = (6, 2) dilation = 1 bias = False L = input_size[0] * input_size[1] x = torch.rand(B, L, in_c) patch_merge = PatchMerging( in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) x_out, out_size = patch_merge(x, input_size) assert x_out.size() == (B, 3, 3) assert out_size == (1, 3) assert x_out.size(1) == out_size[0] * out_size[1] def test_detr_transformer_dencoder_encoder_layer(): config = ConfigDict( dict( return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=( 'norm', 'self_attn', 'norm', 'cross_attn', 'norm', 'ffn', )))) assert DetrTransformerDecoder(**config).layers[0].pre_norm assert len(DetrTransformerDecoder(**config).layers) == 6 DetrTransformerDecoder(**config) with pytest.raises(AssertionError): config = ConfigDict( dict( return_intermediate=True, num_layers=6, transformerlayers=[ dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')) ] * 5)) DetrTransformerDecoder(**config) config = ConfigDict( dict( num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('norm', 'self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')))) with pytest.raises(AssertionError): # len(operation_order) == 6 DetrTransformerEncoder(**config) def test_transformer(): config = ConfigDict( dict( encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), ))) transformer = Transformer(**config) transformer.init_weights() ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_onnx/__init__.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from .utils import ort_validate __all__ = ['ort_validate'] ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_onnx/test_head.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from functools import partial import mmcv import numpy as np import pytest import torch from mmcv.cnn import Scale from mmdet import digit_version from mmdet.models import build_detector from mmdet.models.dense_heads import (FCOSHead, FSAFHead, RetinaHead, SSDHead, YOLOV3Head) from .utils import ort_validate data_path = osp.join(osp.dirname(__file__), 'data') if digit_version(torch.__version__) <= digit_version('1.5.0'): pytest.skip( 'ort backend does not support version below 1.5.0', allow_module_level=True) def test_cascade_onnx_export(): config_path = './configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' cfg = mmcv.Config.fromfile(config_path) model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) with torch.no_grad(): model.forward = partial(model.forward, img_metas=[[dict()]]) dynamic_axes = { 'input_img': { 0: 'batch', 2: 'width', 3: 'height' }, 'dets': { 0: 'batch', 1: 'num_dets', }, 'labels': { 0: 'batch', 1: 'num_dets', }, } torch.onnx.export( model, [torch.rand(1, 3, 400, 500)], 'tmp.onnx', output_names=['dets', 'labels'], input_names=['input_img'], keep_initializers_as_inputs=True, do_constant_folding=True, verbose=False, opset_version=11, dynamic_axes=dynamic_axes) def test_faster_onnx_export(): config_path = './configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' cfg = mmcv.Config.fromfile(config_path) model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) with torch.no_grad(): model.forward = partial(model.forward, img_metas=[[dict()]]) dynamic_axes = { 'input_img': { 0: 'batch', 2: 'width', 3: 'height' }, 'dets': { 0: 'batch', 1: 'num_dets', }, 'labels': { 0: 'batch', 1: 'num_dets', }, } torch.onnx.export( model, [torch.rand(1, 3, 400, 500)], 'tmp.onnx', output_names=['dets', 'labels'], input_names=['input_img'], keep_initializers_as_inputs=True, do_constant_folding=True, verbose=False, opset_version=11, dynamic_axes=dynamic_axes) def retinanet_config(): """RetinanNet Head Config.""" head_cfg = dict( stacked_convs=6, feat_channels=2, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0])) test_cfg = mmcv.Config( dict( deploy_nms_pre=0, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) model = RetinaHead( num_classes=4, in_channels=1, test_cfg=test_cfg, **head_cfg) model.requires_grad_(False) return model def test_retina_head_forward_single(): """Test RetinaNet Head single forward in torch and onnxruntime env.""" retina_model = retinanet_config() feat = torch.rand(1, retina_model.in_channels, 32, 32) # validate the result between the torch and ort ort_validate(retina_model.forward_single, feat) def test_retina_head_forward(): """Test RetinaNet Head forward in torch and onnxruntime env.""" retina_model = retinanet_config() s = 128 # RetinaNet head expects a multiple levels of features per image feats = [ torch.rand(1, retina_model.in_channels, s // (2**(i + 2)), s // (2**(i + 2))) # [32, 16, 8, 4, 2] for i in range(len(retina_model.prior_generator.strides)) ] ort_validate(retina_model.forward, feats) def test_retinanet_head_onnx_export(): """Test RetinaNet Head _get_bboxes() in torch and onnxruntime env.""" retina_model = retinanet_config() s = 128 img_metas = [{ 'img_shape_for_onnx': torch.Tensor([s, s]), 'scale_factor': np.ones(4), 'pad_shape': (s, s, 3), 'img_shape': (s, s, 2) }] # The data of retina_head_get_bboxes.pkl contains two parts: # cls_score(list(Tensor)) and bboxes(list(Tensor)), # where each torch.Tensor is generated by torch.rand(). # the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16), # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). # the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16), # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2) retina_head_data = 'retina_head_get_bboxes.pkl' feats = mmcv.load(osp.join(data_path, retina_head_data)) cls_score = feats[:5] bboxes = feats[5:] retina_model.onnx_export = partial( retina_model.onnx_export, img_metas=img_metas, with_nms=False) ort_validate(retina_model.onnx_export, (cls_score, bboxes)) def yolo_config(): """YoloV3 Head Config.""" head_cfg = dict( anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder')) test_cfg = mmcv.Config( dict( deploy_nms_pre=0, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100)) model = YOLOV3Head( num_classes=4, in_channels=[1, 1, 1], out_channels=[16, 8, 4], test_cfg=test_cfg, **head_cfg) model.requires_grad_(False) # yolov3 need eval() model.cpu().eval() return model def test_yolov3_head_forward(): """Test Yolov3 head forward() in torch and ort env.""" yolo_model = yolo_config() # Yolov3 head expects a multiple levels of features per image feats = [ torch.rand(1, 1, 64 // (2**(i + 2)), 64 // (2**(i + 2))) for i in range(len(yolo_model.in_channels)) ] ort_validate(yolo_model.forward, feats) def test_yolov3_head_onnx_export(): """Test yolov3 head get_bboxes() in torch and ort env.""" yolo_model = yolo_config() s = 128 img_metas = [{ 'img_shape_for_onnx': torch.Tensor([s, s]), 'img_shape': (s, s, 3), 'scale_factor': np.ones(4), 'pad_shape': (s, s, 3) }] # The data of yolov3_head_get_bboxes.pkl contains # a list of torch.Tensor, where each torch.Tensor # is generated by torch.rand and each tensor size is: # (1, 27, 32, 32), (1, 27, 16, 16), (1, 27, 8, 8). yolo_head_data = 'yolov3_head_get_bboxes.pkl' pred_maps = mmcv.load(osp.join(data_path, yolo_head_data)) yolo_model.onnx_export = partial( yolo_model.onnx_export, img_metas=img_metas, with_nms=False) ort_validate(yolo_model.onnx_export, pred_maps) def fcos_config(): """FCOS Head Config.""" test_cfg = mmcv.Config( dict( deploy_nms_pre=0, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) model = FCOSHead(num_classes=4, in_channels=1, test_cfg=test_cfg) model.requires_grad_(False) return model def test_fcos_head_forward_single(): """Test fcos forward single in torch and ort env.""" fcos_model = fcos_config() feat = torch.rand(1, fcos_model.in_channels, 32, 32) fcos_model.forward_single = partial( fcos_model.forward_single, scale=Scale(1.0).requires_grad_(False), stride=(4, )) ort_validate(fcos_model.forward_single, feat) def test_fcos_head_forward(): """Test fcos forward in mutil-level feature map.""" fcos_model = fcos_config() s = 128 feats = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] ort_validate(fcos_model.forward, feats) def test_fcos_head_onnx_export(): """Test fcos head get_bboxes() in ort.""" fcos_model = fcos_config() s = 128 img_metas = [{ 'img_shape_for_onnx': torch.Tensor([s, s]), 'img_shape': (s, s, 3), 'scale_factor': np.ones(4), 'pad_shape': (s, s, 3) }] cls_scores = [ torch.rand(1, fcos_model.num_classes, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] bboxes = [ torch.rand(1, 4, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] centerness = [ torch.rand(1, 1, s // feat_size, s // feat_size) for feat_size in [4, 8, 16, 32, 64] ] fcos_model.onnx_export = partial( fcos_model.onnx_export, img_metas=img_metas, with_nms=False) ort_validate(fcos_model.onnx_export, (cls_scores, bboxes, centerness)) def fsaf_config(): """FSAF Head Config.""" cfg = dict( anchor_generator=dict( type='AnchorGenerator', octave_base_scale=1, scales_per_octave=1, ratios=[1.0], strides=[8, 16, 32, 64, 128])) test_cfg = mmcv.Config( dict( deploy_nms_pre=0, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) model = FSAFHead(num_classes=4, in_channels=1, test_cfg=test_cfg, **cfg) model.requires_grad_(False) return model def test_fsaf_head_forward_single(): """Test RetinaNet Head forward_single() in torch and onnxruntime env.""" fsaf_model = fsaf_config() feat = torch.rand(1, fsaf_model.in_channels, 32, 32) ort_validate(fsaf_model.forward_single, feat) def test_fsaf_head_forward(): """Test RetinaNet Head forward in torch and onnxruntime env.""" fsaf_model = fsaf_config() s = 128 feats = [ torch.rand(1, fsaf_model.in_channels, s // (2**(i + 2)), s // (2**(i + 2))) for i in range(len(fsaf_model.anchor_generator.strides)) ] ort_validate(fsaf_model.forward, feats) def test_fsaf_head_onnx_export(): """Test RetinaNet Head get_bboxes in torch and onnxruntime env.""" fsaf_model = fsaf_config() s = 256 img_metas = [{ 'img_shape_for_onnx': torch.Tensor([s, s]), 'scale_factor': np.ones(4), 'pad_shape': (s, s, 3), 'img_shape': (s, s, 2) }] # The data of fsaf_head_get_bboxes.pkl contains two parts: # cls_score(list(Tensor)) and bboxes(list(Tensor)), # where each torch.Tensor is generated by torch.rand(). # the cls_score's size: (1, 4, 64, 64), (1, 4, 32, 32), # (1, 4, 16, 16), (1, 4, 8, 8), (1, 4, 4, 4). # the bboxes's size: (1, 4, 64, 64), (1, 4, 32, 32), # (1, 4, 16, 16), (1, 4, 8, 8), (1, 4, 4, 4). fsaf_head_data = 'fsaf_head_get_bboxes.pkl' feats = mmcv.load(osp.join(data_path, fsaf_head_data)) cls_score = feats[:5] bboxes = feats[5:] fsaf_model.onnx_export = partial( fsaf_model.onnx_export, img_metas=img_metas, with_nms=False) ort_validate(fsaf_model.onnx_export, (cls_score, bboxes)) def ssd_config(): """SSD Head Config.""" cfg = dict( anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2])) test_cfg = mmcv.Config( dict( deploy_nms_pre=0, nms=dict(type='nms', iou_threshold=0.45), min_bbox_size=0, score_thr=0.02, max_per_img=200)) model = SSDHead( num_classes=4, in_channels=(4, 8, 4, 2, 2, 2), test_cfg=test_cfg, **cfg) model.requires_grad_(False) return model def test_ssd_head_forward(): """Test SSD Head forward in torch and onnxruntime env.""" ssd_model = ssd_config() featmap_size = [38, 19, 10, 6, 5, 3, 1] feats = [ torch.rand(1, ssd_model.in_channels[i], featmap_size[i], featmap_size[i]) for i in range(len(ssd_model.in_channels)) ] ort_validate(ssd_model.forward, feats) def test_ssd_head_onnx_export(): """Test SSD Head get_bboxes in torch and onnxruntime env.""" ssd_model = ssd_config() s = 300 img_metas = [{ 'img_shape_for_onnx': torch.Tensor([s, s]), 'scale_factor': np.ones(4), 'pad_shape': (s, s, 3), 'img_shape': (s, s, 2) }] # The data of ssd_head_get_bboxes.pkl contains two parts: # cls_score(list(Tensor)) and bboxes(list(Tensor)), # where each torch.Tensor is generated by torch.rand(). # the cls_score's size: (1, 20, 38, 38), (1, 30, 19, 19), # (1, 30, 10, 10), (1, 30, 5, 5), (1, 20, 3, 3), (1, 20, 1, 1). # the bboxes's size: (1, 16, 38, 38), (1, 24, 19, 19), # (1, 24, 10, 10), (1, 24, 5, 5), (1, 16, 3, 3), (1, 16, 1, 1). ssd_head_data = 'ssd_head_get_bboxes.pkl' feats = mmcv.load(osp.join(data_path, ssd_head_data)) cls_score = feats[:6] bboxes = feats[6:] ssd_model.onnx_export = partial( ssd_model.onnx_export, img_metas=img_metas, with_nms=False) ort_validate(ssd_model.onnx_export, (cls_score, bboxes)) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_onnx/test_neck.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import mmcv import pytest import torch from mmdet import digit_version from mmdet.models.necks import FPN, YOLOV3Neck from .utils import ort_validate if digit_version(torch.__version__) <= digit_version('1.5.0'): pytest.skip( 'ort backend does not support version below 1.5.0', allow_module_level=True) # Control the returned model of fpn_neck_config() fpn_test_step_names = { 'fpn_normal': 0, 'fpn_wo_extra_convs': 1, 'fpn_lateral_bns': 2, 'fpn_bilinear_upsample': 3, 'fpn_scale_factor': 4, 'fpn_extra_convs_inputs': 5, 'fpn_extra_convs_laterals': 6, 'fpn_extra_convs_outputs': 7, } # Control the returned model of yolo_neck_config() yolo_test_step_names = {'yolo_normal': 0} data_path = osp.join(osp.dirname(__file__), 'data') def fpn_neck_config(test_step_name): """Return the class containing the corresponding attributes according to the fpn_test_step_names.""" s = 64 in_channels = [8, 16, 32, 64] feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] out_channels = 8 feats = [ torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range(len(in_channels)) ] if (fpn_test_step_names[test_step_name] == 0): fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs=True, num_outs=5) elif (fpn_test_step_names[test_step_name] == 1): fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs=False, num_outs=5) elif (fpn_test_step_names[test_step_name] == 2): fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs=True, no_norm_on_lateral=False, norm_cfg=dict(type='BN', requires_grad=True), num_outs=5) elif (fpn_test_step_names[test_step_name] == 3): fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs=True, upsample_cfg=dict(mode='bilinear', align_corners=True), num_outs=5) elif (fpn_test_step_names[test_step_name] == 4): fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs=True, upsample_cfg=dict(scale_factor=2), num_outs=5) elif (fpn_test_step_names[test_step_name] == 5): fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_input', num_outs=5) elif (fpn_test_step_names[test_step_name] == 6): fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_lateral', num_outs=5) elif (fpn_test_step_names[test_step_name] == 7): fpn_model = FPN( in_channels=in_channels, out_channels=out_channels, add_extra_convs='on_output', num_outs=5) return fpn_model, feats def yolo_neck_config(test_step_name): """Config yolov3 Neck.""" in_channels = [16, 8, 4] out_channels = [8, 4, 2] # The data of yolov3_neck.pkl contains a list of # torch.Tensor, where each torch.Tensor is generated by # torch.rand and each tensor size is: # (1, 4, 64, 64), (1, 8, 32, 32), (1, 16, 16, 16). yolov3_neck_data = 'yolov3_neck.pkl' feats = mmcv.load(osp.join(data_path, yolov3_neck_data)) if (yolo_test_step_names[test_step_name] == 0): yolo_model = YOLOV3Neck( in_channels=in_channels, out_channels=out_channels, num_scales=3) return yolo_model, feats def test_fpn_normal(): outs = fpn_neck_config('fpn_normal') ort_validate(*outs) def test_fpn_wo_extra_convs(): outs = fpn_neck_config('fpn_wo_extra_convs') ort_validate(*outs) def test_fpn_lateral_bns(): outs = fpn_neck_config('fpn_lateral_bns') ort_validate(*outs) def test_fpn_bilinear_upsample(): outs = fpn_neck_config('fpn_bilinear_upsample') ort_validate(*outs) def test_fpn_scale_factor(): outs = fpn_neck_config('fpn_scale_factor') ort_validate(*outs) def test_fpn_extra_convs_inputs(): outs = fpn_neck_config('fpn_extra_convs_inputs') ort_validate(*outs) def test_fpn_extra_convs_laterals(): outs = fpn_neck_config('fpn_extra_convs_laterals') ort_validate(*outs) def test_fpn_extra_convs_outputs(): outs = fpn_neck_config('fpn_extra_convs_outputs') ort_validate(*outs) def test_yolo_normal(): outs = yolo_neck_config('yolo_normal') ort_validate(*outs) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_onnx/utils.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp import warnings import numpy as np import onnx import onnxruntime as ort import torch import torch.nn as nn ort_custom_op_path = '' try: from mmcv.ops import get_onnxruntime_op_path ort_custom_op_path = get_onnxruntime_op_path() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, \ you may have to build mmcv with ONNXRuntime from source.') class WrapFunction(nn.Module): """Wrap the function to be tested for torch.onnx.export tracking.""" def __init__(self, wrapped_function): super(WrapFunction, self).__init__() self.wrapped_function = wrapped_function def forward(self, *args, **kwargs): return self.wrapped_function(*args, **kwargs) def ort_validate(model, feats, onnx_io='tmp.onnx'): """Validate the output of the onnxruntime backend is the same as the output generated by torch. Args: model (nn.Module | function): the function of model or model to be verified. feats (tuple(list(torch.Tensor)) | list(torch.Tensor) | torch.Tensor): the input of model. onnx_io (str): the name of onnx output file. """ # if model is not an instance of nn.Module, then it is a normal # function and it should be wrapped. if isinstance(model, nn.Module): wrap_model = model else: wrap_model = WrapFunction(model) wrap_model.cpu().eval() with torch.no_grad(): torch.onnx.export( wrap_model, feats, onnx_io, export_params=True, keep_initializers_as_inputs=True, do_constant_folding=True, verbose=False, opset_version=11) if isinstance(feats, tuple): ort_feats = [] for feat in feats: ort_feats += feat else: ort_feats = feats # default model name: tmp.onnx onnx_outputs = get_ort_model_output(ort_feats) # remove temp file if osp.exists(onnx_io): os.remove(onnx_io) if isinstance(feats, tuple): torch_outputs = convert_result_list(wrap_model.forward(*feats)) else: torch_outputs = convert_result_list(wrap_model.forward(feats)) torch_outputs = [ torch_output.detach().numpy() for torch_output in torch_outputs ] # match torch_outputs and onnx_outputs for i in range(len(onnx_outputs)): np.testing.assert_allclose( torch_outputs[i], onnx_outputs[i], rtol=1e-03, atol=1e-05) def get_ort_model_output(feat, onnx_io='tmp.onnx'): """Run the model in onnxruntime env. Args: feat (list[Tensor]): A list of tensors from torch.rand, each is a 4D-tensor. Returns: list[np.array]: onnxruntime infer result, each is a np.array """ onnx_model = onnx.load(onnx_io) onnx.checker.check_model(onnx_model) session_options = ort.SessionOptions() # register custom op for onnxruntime if osp.exists(ort_custom_op_path): session_options.register_custom_ops_library(ort_custom_op_path) sess = ort.InferenceSession(onnx_io, session_options) if isinstance(feat, torch.Tensor): onnx_outputs = sess.run(None, {sess.get_inputs()[0].name: feat.numpy()}) else: onnx_outputs = sess.run(None, { sess.get_inputs()[i].name: feat[i].numpy() for i in range(len(feat)) }) return onnx_outputs def convert_result_list(outputs): """Convert the torch forward outputs containing tuple or list to a list only containing torch.Tensor. Args: output (list(Tensor) | tuple(list(Tensor) | ...): the outputs in torch env, maybe containing nested structures such as list or tuple. Returns: list(Tensor): a list only containing torch.Tensor """ # recursive end condition if isinstance(outputs, torch.Tensor): return [outputs] ret = [] for sub in outputs: ret += convert_result_list(sub) return ret ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_runtime/async_benchmark.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import asyncio import os import shutil import urllib import mmcv import torch from mmdet.apis import (async_inference_detector, inference_detector, init_detector) from mmdet.utils.contextmanagers import concurrent from mmdet.utils.profiling import profile_time async def main(): """Benchmark between async and synchronous inference interfaces. Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x: async sync 7981.79 ms 9660.82 ms 8074.52 ms 9660.94 ms 7976.44 ms 9406.83 ms Async variant takes about 0.83-0.85 of the time of the synchronous interface. """ project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) project_dir = os.path.join(project_dir, '..') config_file = os.path.join( project_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py') checkpoint_file = os.path.join( project_dir, 'checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth') if not os.path.exists(checkpoint_file): url = ('https://download.openmmlab.com/mmdetection/v2.0' '/mask_rcnn/mask_rcnn_r50_fpn_1x_coco' '/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth') print(f'Downloading {url} ...') local_filename, _ = urllib.request.urlretrieve(url) os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True) shutil.move(local_filename, checkpoint_file) print(f'Saved as {checkpoint_file}') else: print(f'Using existing checkpoint {checkpoint_file}') device = 'cuda:0' model = init_detector( config_file, checkpoint=checkpoint_file, device=device) # queue is used for concurrent inference of multiple images streamqueue = asyncio.Queue() # queue size defines concurrency level streamqueue_size = 4 for _ in range(streamqueue_size): streamqueue.put_nowait(torch.cuda.Stream(device=device)) # test a single image and show the results img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg')) # warmup await async_inference_detector(model, img) async def detect(img): async with concurrent(streamqueue): return await async_inference_detector(model, img) num_of_images = 20 with profile_time('benchmark', 'async'): tasks = [ asyncio.create_task(detect(img)) for _ in range(num_of_images) ] async_results = await asyncio.gather(*tasks) with torch.cuda.stream(torch.cuda.default_stream()): with profile_time('benchmark', 'sync'): sync_results = [ inference_detector(model, img) for _ in range(num_of_images) ] result_dir = os.path.join(project_dir, 'demo') model.show_result( img, async_results[0], score_thr=0.5, show=False, out_file=os.path.join(result_dir, 'result_async.jpg')) model.show_result( img, sync_results[0], score_thr=0.5, show=False, out_file=os.path.join(result_dir, 'result_sync.jpg')) if __name__ == '__main__': asyncio.run(main()) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_runtime/test_apis.py ================================================ import os from pathlib import Path import pytest from mmdet.apis import init_detector def test_init_detector(): project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) project_dir = os.path.join(project_dir, '..') config_file = os.path.join( project_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py') # test init_detector with config_file: str and cfg_options cfg_options = dict( model=dict( backbone=dict( depth=18, init_cfg=dict( type='Pretrained', checkpoint='torchvision://resnet18')))) model = init_detector(config_file, device='cpu', cfg_options=cfg_options) # test init_detector with :obj:`Path` config_path_object = Path(config_file) model = init_detector(config_path_object, device='cpu') # test init_detector with undesirable type with pytest.raises(TypeError): config_list = [config_file] model = init_detector(config_list) # noqa: F841 ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_runtime/test_async.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """Tests for async interface.""" import asyncio import os import sys import asynctest import mmcv import torch from mmdet.apis import async_inference_detector, init_detector if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import concurrent class AsyncTestCase(asynctest.TestCase): use_default_loop = False forbid_get_event_loop = True TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30')) def _run_test_method(self, method): result = method() if asyncio.iscoroutine(result): self.loop.run_until_complete( asyncio.wait_for(result, timeout=self.TEST_TIMEOUT)) class MaskRCNNDetector: def __init__(self, model_config, checkpoint=None, streamqueue_size=3, device='cuda:0'): self.streamqueue_size = streamqueue_size self.device = device # build the model and load checkpoint self.model = init_detector( model_config, checkpoint=None, device=self.device) self.streamqueue = None async def init(self): self.streamqueue = asyncio.Queue() for _ in range(self.streamqueue_size): stream = torch.cuda.Stream(device=self.device) self.streamqueue.put_nowait(stream) if sys.version_info >= (3, 7): async def apredict(self, img): if isinstance(img, str): img = mmcv.imread(img) async with concurrent(self.streamqueue): result = await async_inference_detector(self.model, img) return result class AsyncInferenceTestCase(AsyncTestCase): if sys.version_info >= (3, 7): async def test_simple_inference(self): if not torch.cuda.is_available(): import pytest pytest.skip('test requires GPU and torch+cuda') ori_grad_enabled = torch.is_grad_enabled() root_dir = os.path.dirname(os.path.dirname(__name__)) model_config = os.path.join( root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py') detector = MaskRCNNDetector(model_config) await detector.init() img_path = os.path.join(root_dir, 'demo/demo.jpg') bboxes, _ = await detector.apredict(img_path) self.assertTrue(bboxes) # asy inference detector will hack grad_enabled, # so restore here to avoid it to influence other tests torch.set_grad_enabled(ori_grad_enabled) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_runtime/test_config.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from os.path import dirname, exists, join from unittest.mock import Mock import pytest from mmdet.core import BitmapMasks, PolygonMasks from mmdet.datasets.builder import DATASETS from mmdet.datasets.utils import NumClassCheckHook def _get_config_directory(): """Find the predefined detector config directory.""" try: # Assume we are running in the source mmdetection repo repo_dpath = dirname(dirname(__file__)) repo_dpath = join(repo_dpath, '..') except NameError: # For IPython development when this __file__ is not defined import mmdet repo_dpath = dirname(dirname(mmdet.__file__)) config_dpath = join(repo_dpath, 'configs') if not exists(config_dpath): raise Exception('Cannot find config path') return config_dpath def _check_numclasscheckhook(detector, config_mod): dummy_runner = Mock() dummy_runner.model = detector def get_dataset_name_classes(dataset): # deal with `RepeatDataset`,`ConcatDataset`,`ClassBalancedDataset`.. if isinstance(dataset, (list, tuple)): dataset = dataset[0] while ('dataset' in dataset): dataset = dataset['dataset'] # ConcatDataset if isinstance(dataset, (list, tuple)): dataset = dataset[0] return dataset['type'], dataset.get('classes', None) compatible_check = NumClassCheckHook() dataset_name, CLASSES = get_dataset_name_classes( config_mod['data']['train']) if CLASSES is None: CLASSES = DATASETS.get(dataset_name).CLASSES dummy_runner.data_loader.dataset.CLASSES = CLASSES compatible_check.before_train_epoch(dummy_runner) dummy_runner.data_loader.dataset.CLASSES = None compatible_check.before_train_epoch(dummy_runner) dataset_name, CLASSES = get_dataset_name_classes(config_mod['data']['val']) if CLASSES is None: CLASSES = DATASETS.get(dataset_name).CLASSES dummy_runner.data_loader.dataset.CLASSES = CLASSES compatible_check.before_val_epoch(dummy_runner) dummy_runner.data_loader.dataset.CLASSES = None compatible_check.before_val_epoch(dummy_runner) def _check_roi_head(config, head): # check consistency between head_config and roi_head assert config['type'] == head.__class__.__name__ # check roi_align bbox_roi_cfg = config.bbox_roi_extractor bbox_roi_extractor = head.bbox_roi_extractor _check_roi_extractor(bbox_roi_cfg, bbox_roi_extractor) # check bbox head infos bbox_cfg = config.bbox_head bbox_head = head.bbox_head _check_bbox_head(bbox_cfg, bbox_head) if head.with_mask: # check roi_align if config.mask_roi_extractor: mask_roi_cfg = config.mask_roi_extractor mask_roi_extractor = head.mask_roi_extractor _check_roi_extractor(mask_roi_cfg, mask_roi_extractor, bbox_roi_extractor) # check mask head infos mask_head = head.mask_head mask_cfg = config.mask_head _check_mask_head(mask_cfg, mask_head) # check arch specific settings, e.g., cascade/htc if config['type'] in ['CascadeRoIHead', 'HybridTaskCascadeRoIHead']: assert config.num_stages == len(head.bbox_head) assert config.num_stages == len(head.bbox_roi_extractor) if head.with_mask: assert config.num_stages == len(head.mask_head) assert config.num_stages == len(head.mask_roi_extractor) elif config['type'] in ['MaskScoringRoIHead']: assert (hasattr(head, 'mask_iou_head') and head.mask_iou_head is not None) mask_iou_cfg = config.mask_iou_head mask_iou_head = head.mask_iou_head assert (mask_iou_cfg.fc_out_channels == mask_iou_head.fc_mask_iou.in_features) elif config['type'] in ['GridRoIHead']: grid_roi_cfg = config.grid_roi_extractor grid_roi_extractor = head.grid_roi_extractor _check_roi_extractor(grid_roi_cfg, grid_roi_extractor, bbox_roi_extractor) config.grid_head.grid_points = head.grid_head.grid_points def _check_roi_extractor(config, roi_extractor, prev_roi_extractor=None): import torch.nn as nn # Separate roi_extractor and prev_roi_extractor checks for flexibility if isinstance(roi_extractor, nn.ModuleList): roi_extractor = roi_extractor[0] if prev_roi_extractor and isinstance(prev_roi_extractor, nn.ModuleList): prev_roi_extractor = prev_roi_extractor[0] assert (len(config.featmap_strides) == len(roi_extractor.roi_layers)) assert (config.out_channels == roi_extractor.out_channels) from torch.nn.modules.utils import _pair assert (_pair(config.roi_layer.output_size) == roi_extractor.roi_layers[0].output_size) if 'use_torchvision' in config.roi_layer: assert (config.roi_layer.use_torchvision == roi_extractor.roi_layers[0].use_torchvision) elif 'aligned' in config.roi_layer: assert ( config.roi_layer.aligned == roi_extractor.roi_layers[0].aligned) if prev_roi_extractor: assert (roi_extractor.roi_layers[0].aligned == prev_roi_extractor.roi_layers[0].aligned) assert (roi_extractor.roi_layers[0].use_torchvision == prev_roi_extractor.roi_layers[0].use_torchvision) def _check_mask_head(mask_cfg, mask_head): import torch.nn as nn if isinstance(mask_cfg, list): for single_mask_cfg, single_mask_head in zip(mask_cfg, mask_head): _check_mask_head(single_mask_cfg, single_mask_head) elif isinstance(mask_head, nn.ModuleList): for single_mask_head in mask_head: _check_mask_head(mask_cfg, single_mask_head) else: assert mask_cfg['type'] == mask_head.__class__.__name__ assert mask_cfg.in_channels == mask_head.in_channels class_agnostic = mask_cfg.get('class_agnostic', False) out_dim = (1 if class_agnostic else mask_cfg.num_classes) if hasattr(mask_head, 'conv_logits'): assert (mask_cfg.conv_out_channels == mask_head.conv_logits.in_channels) assert mask_head.conv_logits.out_channels == out_dim else: assert mask_cfg.fc_out_channels == mask_head.fc_logits.in_features assert (mask_head.fc_logits.out_features == out_dim * mask_head.output_area) def _check_bbox_head(bbox_cfg, bbox_head): import torch.nn as nn if isinstance(bbox_cfg, list): for single_bbox_cfg, single_bbox_head in zip(bbox_cfg, bbox_head): _check_bbox_head(single_bbox_cfg, single_bbox_head) elif isinstance(bbox_head, nn.ModuleList): for single_bbox_head in bbox_head: _check_bbox_head(bbox_cfg, single_bbox_head) else: assert bbox_cfg['type'] == bbox_head.__class__.__name__ if bbox_cfg['type'] == 'SABLHead': assert bbox_cfg.cls_in_channels == bbox_head.cls_in_channels assert bbox_cfg.reg_in_channels == bbox_head.reg_in_channels cls_out_channels = bbox_cfg.get('cls_out_channels', 1024) assert (cls_out_channels == bbox_head.fc_cls.in_features) assert (bbox_cfg.num_classes + 1 == bbox_head.fc_cls.out_features) elif bbox_cfg['type'] == 'DIIHead': assert bbox_cfg['num_ffn_fcs'] == bbox_head.ffn.num_fcs # 3 means FC and LN and Relu assert bbox_cfg['num_cls_fcs'] == len(bbox_head.cls_fcs) // 3 assert bbox_cfg['num_reg_fcs'] == len(bbox_head.reg_fcs) // 3 assert bbox_cfg['in_channels'] == bbox_head.in_channels assert bbox_cfg['in_channels'] == bbox_head.fc_cls.in_features assert bbox_cfg['in_channels'] == bbox_head.fc_reg.in_features assert bbox_cfg['in_channels'] == bbox_head.attention.embed_dims assert bbox_cfg[ 'feedforward_channels'] == bbox_head.ffn.feedforward_channels else: assert bbox_cfg.in_channels == bbox_head.in_channels with_cls = bbox_cfg.get('with_cls', True) if with_cls: fc_out_channels = bbox_cfg.get('fc_out_channels', 2048) assert (fc_out_channels == bbox_head.fc_cls.in_features) if bbox_head.custom_cls_channels: assert (bbox_head.loss_cls.get_cls_channels( bbox_head.num_classes) == bbox_head.fc_cls.out_features ) else: assert (bbox_cfg.num_classes + 1 == bbox_head.fc_cls.out_features) with_reg = bbox_cfg.get('with_reg', True) if with_reg: out_dim = (4 if bbox_cfg.reg_class_agnostic else 4 * bbox_cfg.num_classes) assert bbox_head.fc_reg.out_features == out_dim def _check_anchorhead(config, head): # check consistency between head_config and roi_head assert config['type'] == head.__class__.__name__ assert config.in_channels == head.in_channels num_classes = ( config.num_classes - 1 if config.loss_cls.get('use_sigmoid', False) else config.num_classes) if config['type'] == 'ATSSHead': assert (config.feat_channels == head.atss_cls.in_channels) assert (config.feat_channels == head.atss_reg.in_channels) assert (config.feat_channels == head.atss_centerness.in_channels) elif config['type'] == 'SABLRetinaHead': assert (config.feat_channels == head.retina_cls.in_channels) assert (config.feat_channels == head.retina_bbox_reg.in_channels) assert (config.feat_channels == head.retina_bbox_cls.in_channels) else: assert (config.in_channels == head.conv_cls.in_channels) assert (config.in_channels == head.conv_reg.in_channels) assert (head.conv_cls.out_channels == num_classes * head.num_anchors) assert head.fc_reg.out_channels == 4 * head.num_anchors # Only tests a representative subset of configurations # TODO: test pipelines using Albu, current Albu throw None given empty GT @pytest.mark.parametrize( 'config_rpath', [ 'wider_face/ssd300_wider_face.py', 'pascal_voc/ssd300_voc0712.py', 'pascal_voc/ssd512_voc0712.py', # 'albu_example/mask_rcnn_r50_fpn_1x.py', 'foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py', 'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py', 'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py', 'mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py' ]) def test_config_data_pipeline(config_rpath): """Test whether the data pipeline is valid and can process corner cases. CommandLine: xdoctest -m tests/test_runtime/ test_config.py test_config_build_data_pipeline """ import numpy as np from mmcv import Config from mmdet.datasets.pipelines import Compose config_dpath = _get_config_directory() print(f'Found config_dpath = {config_dpath}') def dummy_masks(h, w, num_obj=3, mode='bitmap'): assert mode in ('polygon', 'bitmap') if mode == 'bitmap': masks = np.random.randint(0, 2, (num_obj, h, w), dtype=np.uint8) masks = BitmapMasks(masks, h, w) else: masks = [] for i in range(num_obj): masks.append([]) masks[-1].append( np.random.uniform(0, min(h - 1, w - 1), (8 + 4 * i, ))) masks[-1].append( np.random.uniform(0, min(h - 1, w - 1), (10 + 4 * i, ))) masks = PolygonMasks(masks, h, w) return masks config_fpath = join(config_dpath, config_rpath) cfg = Config.fromfile(config_fpath) # remove loading pipeline loading_pipeline = cfg.train_pipeline.pop(0) loading_ann_pipeline = cfg.train_pipeline.pop(0) cfg.test_pipeline.pop(0) train_pipeline = Compose(cfg.train_pipeline) test_pipeline = Compose(cfg.test_pipeline) print(f'Building data pipeline, config_fpath = {config_fpath}') print(f'Test training data pipeline: \n{train_pipeline!r}') img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8) if loading_pipeline.get('to_float32', False): img = img.astype(np.float32) mode = 'bitmap' if loading_ann_pipeline.get('poly2mask', True) else 'polygon' results = dict( filename='test_img.png', ori_filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32), gt_labels=np.array([1], dtype=np.int64), gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode), ) results['img_fields'] = ['img'] results['bbox_fields'] = ['gt_bboxes'] results['mask_fields'] = ['gt_masks'] output_results = train_pipeline(results) assert output_results is not None print(f'Test testing data pipeline: \n{test_pipeline!r}') results = dict( filename='test_img.png', ori_filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32), gt_labels=np.array([1], dtype=np.int64), gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode), ) results['img_fields'] = ['img'] results['bbox_fields'] = ['gt_bboxes'] results['mask_fields'] = ['gt_masks'] output_results = test_pipeline(results) assert output_results is not None # test empty GT print('Test empty GT with training data pipeline: ' f'\n{train_pipeline!r}') results = dict( filename='test_img.png', ori_filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.zeros((0, 4), dtype=np.float32), gt_labels=np.array([], dtype=np.int64), gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode), ) results['img_fields'] = ['img'] results['bbox_fields'] = ['gt_bboxes'] results['mask_fields'] = ['gt_masks'] output_results = train_pipeline(results) assert output_results is not None print(f'Test empty GT with testing data pipeline: \n{test_pipeline!r}') results = dict( filename='test_img.png', ori_filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.zeros((0, 4), dtype=np.float32), gt_labels=np.array([], dtype=np.int64), gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode), ) results['img_fields'] = ['img'] results['bbox_fields'] = ['gt_bboxes'] results['mask_fields'] = ['gt_masks'] output_results = test_pipeline(results) assert output_results is not None ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_runtime/test_eval_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile import unittest.mock as mock from collections import OrderedDict from unittest.mock import MagicMock, patch import pytest import torch import torch.nn as nn from mmcv.runner import EpochBasedRunner, build_optimizer from mmcv.utils import get_logger from torch.utils.data import DataLoader, Dataset from mmdet.core import DistEvalHook, EvalHook class ExampleDataset(Dataset): def __init__(self): self.index = 0 self.eval_result = [0.1, 0.4, 0.3, 0.7, 0.2, 0.05, 0.4, 0.6] def __getitem__(self, idx): results = dict(imgs=torch.tensor([1])) return results def __len__(self): return 1 @mock.create_autospec def evaluate(self, results, logger=None): pass class EvalDataset(ExampleDataset): def evaluate(self, results, logger=None): mean_ap = self.eval_result[self.index] output = OrderedDict(mAP=mean_ap, index=self.index, score=mean_ap) self.index += 1 return output class ExampleModel(nn.Module): def __init__(self): super().__init__() self.conv = nn.Linear(1, 1) self.test_cfg = None def forward(self, imgs, rescale=False, return_loss=False): return imgs def train_step(self, data_batch, optimizer, **kwargs): outputs = { 'loss': 0.5, 'log_vars': { 'accuracy': 0.98 }, 'num_samples': 1 } return outputs @pytest.mark.skipif( not torch.cuda.is_available(), reason='requires CUDA support') @patch('mmdet.apis.single_gpu_test', MagicMock) @patch('mmdet.apis.multi_gpu_test', MagicMock) @pytest.mark.parametrize('EvalHookCls', (EvalHook, DistEvalHook)) def test_eval_hook(EvalHookCls): with pytest.raises(TypeError): # dataloader must be a pytorch DataLoader test_dataset = ExampleDataset() data_loader = [ DataLoader( test_dataset, batch_size=1, sampler=None, num_worker=0, shuffle=False) ] EvalHookCls(data_loader) with pytest.raises(KeyError): # rule must be in keys of rule_map test_dataset = ExampleDataset() data_loader = DataLoader( test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) EvalHookCls(data_loader, save_best='auto', rule='unsupport') with pytest.raises(ValueError): # key_indicator must be valid when rule_map is None test_dataset = ExampleDataset() data_loader = DataLoader( test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) EvalHookCls(data_loader, save_best='unsupport') optimizer_cfg = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) test_dataset = ExampleDataset() loader = DataLoader(test_dataset, batch_size=1) model = ExampleModel() optimizer = build_optimizer(model, optimizer_cfg) data_loader = DataLoader(test_dataset, batch_size=1) eval_hook = EvalHookCls(data_loader, save_best=None) with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner( model=model, batch_processor=None, optimizer=optimizer, work_dir=tmpdir, logger=logger) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 1) assert runner.meta is None or 'best_score' not in runner.meta[ 'hook_msgs'] assert runner.meta is None or 'best_ckpt' not in runner.meta[ 'hook_msgs'] # when `save_best` is set to 'auto', first metric will be used. loader = DataLoader(EvalDataset(), batch_size=1) model = ExampleModel() data_loader = DataLoader(EvalDataset(), batch_size=1) eval_hook = EvalHookCls(data_loader, interval=1, save_best='auto') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner( model=model, batch_processor=None, optimizer=optimizer, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth') assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path) assert runner.meta['hook_msgs']['best_score'] == 0.7 loader = DataLoader(EvalDataset(), batch_size=1) model = ExampleModel() data_loader = DataLoader(EvalDataset(), batch_size=1) eval_hook = EvalHookCls(data_loader, interval=1, save_best='mAP') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner( model=model, batch_processor=None, optimizer=optimizer, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth') assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path) assert runner.meta['hook_msgs']['best_score'] == 0.7 data_loader = DataLoader(EvalDataset(), batch_size=1) eval_hook = EvalHookCls( data_loader, interval=1, save_best='score', rule='greater') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner( model=model, batch_processor=None, optimizer=optimizer, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) real_path = osp.join(tmpdir, 'best_score_epoch_4.pth') assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path) assert runner.meta['hook_msgs']['best_score'] == 0.7 data_loader = DataLoader(EvalDataset(), batch_size=1) eval_hook = EvalHookCls(data_loader, save_best='mAP', rule='less') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner( model=model, batch_processor=None, optimizer=optimizer, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) real_path = osp.join(tmpdir, 'best_mAP_epoch_6.pth') assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path) assert runner.meta['hook_msgs']['best_score'] == 0.05 data_loader = DataLoader(EvalDataset(), batch_size=1) eval_hook = EvalHookCls(data_loader, save_best='mAP') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner( model=model, batch_processor=None, optimizer=optimizer, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 2) real_path = osp.join(tmpdir, 'best_mAP_epoch_2.pth') assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path) assert runner.meta['hook_msgs']['best_score'] == 0.4 resume_from = osp.join(tmpdir, 'latest.pth') loader = DataLoader(ExampleDataset(), batch_size=1) eval_hook = EvalHookCls(data_loader, save_best='mAP') runner = EpochBasedRunner( model=model, batch_processor=None, optimizer=optimizer, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.resume(resume_from) runner.run([loader], [('train', 1)], 8) real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth') assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path) assert runner.meta['hook_msgs']['best_score'] == 0.7 ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_runtime/test_fp16.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest import torch import torch.nn as nn from mmcv.runner import auto_fp16, force_fp32 from mmcv.runner.fp16_utils import cast_tensor_type def test_cast_tensor_type(): inputs = torch.FloatTensor([5.]) src_type = torch.float32 dst_type = torch.int32 outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, torch.Tensor) assert outputs.dtype == dst_type inputs = 'tensor' src_type = str dst_type = str outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, str) inputs = np.array([5.]) src_type = np.ndarray dst_type = np.ndarray outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, np.ndarray) inputs = dict( tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.])) src_type = torch.float32 dst_type = torch.int32 outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, dict) assert outputs['tensor_a'].dtype == dst_type assert outputs['tensor_b'].dtype == dst_type inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])] src_type = torch.float32 dst_type = torch.int32 outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, list) assert outputs[0].dtype == dst_type assert outputs[1].dtype == dst_type inputs = 5 outputs = cast_tensor_type(inputs, None, None) assert isinstance(outputs, int) def test_auto_fp16(): with pytest.raises(TypeError): # ExampleObject is not a subclass of nn.Module class ExampleObject: @auto_fp16() def __call__(self, x): return x model = ExampleObject() input_x = torch.ones(1, dtype=torch.float32) model(input_x) # apply to all input args class ExampleModule(nn.Module): @auto_fp16() def forward(self, x, y): return x, y model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) output_x, output_y = model(input_x, input_y) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 model.fp16_enabled = True output_x, output_y = model(input_x, input_y) assert output_x.dtype == torch.half assert output_y.dtype == torch.half if torch.cuda.is_available(): model.cuda() output_x, output_y = model(input_x.cuda(), input_y.cuda()) assert output_x.dtype == torch.half assert output_y.dtype == torch.half # apply to specified input args class ExampleModule(nn.Module): @auto_fp16(apply_to=('x', )) def forward(self, x, y): return x, y model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) output_x, output_y = model(input_x, input_y) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 model.fp16_enabled = True output_x, output_y = model(input_x, input_y) assert output_x.dtype == torch.half assert output_y.dtype == torch.float32 if torch.cuda.is_available(): model.cuda() output_x, output_y = model(input_x.cuda(), input_y.cuda()) assert output_x.dtype == torch.half assert output_y.dtype == torch.float32 # apply to optional input args class ExampleModule(nn.Module): @auto_fp16(apply_to=('x', 'y')) def forward(self, x, y=None, z=None): return x, y, z model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) input_z = torch.ones(1, dtype=torch.float32) output_x, output_y, output_z = model(input_x, y=input_y, z=input_z) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 assert output_z.dtype == torch.float32 model.fp16_enabled = True output_x, output_y, output_z = model(input_x, y=input_y, z=input_z) assert output_x.dtype == torch.half assert output_y.dtype == torch.half assert output_z.dtype == torch.float32 if torch.cuda.is_available(): model.cuda() output_x, output_y, output_z = model( input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert output_x.dtype == torch.half assert output_y.dtype == torch.half assert output_z.dtype == torch.float32 # out_fp32=True class ExampleModule(nn.Module): @auto_fp16(apply_to=('x', 'y'), out_fp32=True) def forward(self, x, y=None, z=None): return x, y, z model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.float32) input_z = torch.ones(1, dtype=torch.float32) output_x, output_y, output_z = model(input_x, y=input_y, z=input_z) assert output_x.dtype == torch.half assert output_y.dtype == torch.float32 assert output_z.dtype == torch.float32 model.fp16_enabled = True output_x, output_y, output_z = model(input_x, y=input_y, z=input_z) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 assert output_z.dtype == torch.float32 if torch.cuda.is_available(): model.cuda() output_x, output_y, output_z = model( input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 assert output_z.dtype == torch.float32 def test_force_fp32(): with pytest.raises(TypeError): # ExampleObject is not a subclass of nn.Module class ExampleObject: @force_fp32() def __call__(self, x): return x model = ExampleObject() input_x = torch.ones(1, dtype=torch.float32) model(input_x) # apply to all input args class ExampleModule(nn.Module): @force_fp32() def forward(self, x, y): return x, y model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.half) output_x, output_y = model(input_x, input_y) assert output_x.dtype == torch.half assert output_y.dtype == torch.half model.fp16_enabled = True output_x, output_y = model(input_x, input_y) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 if torch.cuda.is_available(): model.cuda() output_x, output_y = model(input_x.cuda(), input_y.cuda()) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 # apply to specified input args class ExampleModule(nn.Module): @force_fp32(apply_to=('x', )) def forward(self, x, y): return x, y model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.half) output_x, output_y = model(input_x, input_y) assert output_x.dtype == torch.half assert output_y.dtype == torch.half model.fp16_enabled = True output_x, output_y = model(input_x, input_y) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.half if torch.cuda.is_available(): model.cuda() output_x, output_y = model(input_x.cuda(), input_y.cuda()) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.half # apply to optional input args class ExampleModule(nn.Module): @force_fp32(apply_to=('x', 'y')) def forward(self, x, y=None, z=None): return x, y, z model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.half) input_z = torch.ones(1, dtype=torch.half) output_x, output_y, output_z = model(input_x, y=input_y, z=input_z) assert output_x.dtype == torch.half assert output_y.dtype == torch.half assert output_z.dtype == torch.half model.fp16_enabled = True output_x, output_y, output_z = model(input_x, y=input_y, z=input_z) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 assert output_z.dtype == torch.half if torch.cuda.is_available(): model.cuda() output_x, output_y, output_z = model( input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.float32 assert output_z.dtype == torch.half # out_fp16=True class ExampleModule(nn.Module): @force_fp32(apply_to=('x', 'y'), out_fp16=True) def forward(self, x, y=None, z=None): return x, y, z model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.half) input_z = torch.ones(1, dtype=torch.half) output_x, output_y, output_z = model(input_x, y=input_y, z=input_z) assert output_x.dtype == torch.float32 assert output_y.dtype == torch.half assert output_z.dtype == torch.half model.fp16_enabled = True output_x, output_y, output_z = model(input_x, y=input_y, z=input_z) assert output_x.dtype == torch.half assert output_y.dtype == torch.half assert output_z.dtype == torch.half if torch.cuda.is_available(): model.cuda() output_x, output_y, output_z = model( input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert output_x.dtype == torch.half assert output_y.dtype == torch.half assert output_z.dtype == torch.half ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_anchor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """ CommandLine: pytest tests/test_utils/test_anchor.py xdoctest tests/test_utils/test_anchor.py zero """ import pytest import torch def test_standard_points_generator(): from mmdet.core.anchor import build_prior_generator # teat init anchor_generator_cfg = dict( type='MlvlPointGenerator', strides=[4, 8], offset=0) anchor_generator = build_prior_generator(anchor_generator_cfg) assert anchor_generator is not None assert anchor_generator.num_base_priors == [1, 1] # test_stride from mmdet.core.anchor import MlvlPointGenerator # Square strides mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0) mlvl_points_half_stride_generator = MlvlPointGenerator( strides=[4, 10], offset=0.5) assert mlvl_points.num_levels == 2 # assert self.num_levels == len(featmap_sizes) with pytest.raises(AssertionError): mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cpu') priors = mlvl_points.grid_priors( featmap_sizes=[(2, 2), (4, 8)], device='cpu') priors_with_stride = mlvl_points.grid_priors( featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cpu') assert len(priors) == 2 # assert last dimension is (coord_x, coord_y, stride_w, stride_h). assert priors_with_stride[0].size(1) == 4 assert priors_with_stride[0][0][2] == 4 assert priors_with_stride[0][0][3] == 4 assert priors_with_stride[1][0][2] == 10 assert priors_with_stride[1][0][3] == 10 stride_4_feat_2_2 = priors[0] assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4 assert stride_4_feat_2_2.size(0) == 4 assert stride_4_feat_2_2.size(1) == 2 stride_10_feat_4_8 = priors[1] assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10 assert stride_10_feat_4_8.size(0) == 4 * 8 assert stride_10_feat_4_8.size(1) == 2 # assert the offset of 0.5 * stride priors_half_offset = mlvl_points_half_stride_generator.grid_priors( featmap_sizes=[(2, 2), (4, 8)], device='cpu') assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2 assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2 if torch.cuda.is_available(): anchor_generator_cfg = dict( type='MlvlPointGenerator', strides=[4, 8], offset=0) anchor_generator = build_prior_generator(anchor_generator_cfg) assert anchor_generator is not None # Square strides mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0) mlvl_points_half_stride_generator = MlvlPointGenerator( strides=[4, 10], offset=0.5) assert mlvl_points.num_levels == 2 # assert self.num_levels == len(featmap_sizes) with pytest.raises(AssertionError): mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cuda') priors = mlvl_points.grid_priors( featmap_sizes=[(2, 2), (4, 8)], device='cuda') priors_with_stride = mlvl_points.grid_priors( featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cuda') assert len(priors) == 2 # assert last dimension is (coord_x, coord_y, stride_w, stride_h). assert priors_with_stride[0].size(1) == 4 assert priors_with_stride[0][0][2] == 4 assert priors_with_stride[0][0][3] == 4 assert priors_with_stride[1][0][2] == 10 assert priors_with_stride[1][0][3] == 10 stride_4_feat_2_2 = priors[0] assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4 assert stride_4_feat_2_2.size(0) == 4 assert stride_4_feat_2_2.size(1) == 2 stride_10_feat_4_8 = priors[1] assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10 assert stride_10_feat_4_8.size(0) == 4 * 8 assert stride_10_feat_4_8.size(1) == 2 # assert the offset of 0.5 * stride priors_half_offset = mlvl_points_half_stride_generator.grid_priors( featmap_sizes=[(2, 2), (4, 8)], device='cuda') assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2 assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2 def test_sparse_prior(): from mmdet.core.anchor import MlvlPointGenerator mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0) prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long() featmap_sizes = [(3, 5), (6, 4)] grid_anchors = mlvl_points.grid_priors( featmap_sizes=featmap_sizes, with_stride=False, device='cpu') sparse_prior = mlvl_points.sparse_priors( prior_idxs=prior_indexs, featmap_size=featmap_sizes[0], level_idx=0, device='cpu') assert not sparse_prior.is_cuda assert (sparse_prior == grid_anchors[0][prior_indexs]).all() sparse_prior = mlvl_points.sparse_priors( prior_idxs=prior_indexs, featmap_size=featmap_sizes[1], level_idx=1, device='cpu') assert (sparse_prior == grid_anchors[1][prior_indexs]).all() from mmdet.core.anchor import AnchorGenerator mlvl_anchors = AnchorGenerator( strides=[16, 32], ratios=[1.], scales=[1.], base_sizes=[4, 8]) prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long() featmap_sizes = [(3, 5), (6, 4)] grid_anchors = mlvl_anchors.grid_priors( featmap_sizes=featmap_sizes, device='cpu') sparse_prior = mlvl_anchors.sparse_priors( prior_idxs=prior_indexs, featmap_size=featmap_sizes[0], level_idx=0, device='cpu') assert (sparse_prior == grid_anchors[0][prior_indexs]).all() sparse_prior = mlvl_anchors.sparse_priors( prior_idxs=prior_indexs, featmap_size=featmap_sizes[1], level_idx=1, device='cpu') assert (sparse_prior == grid_anchors[1][prior_indexs]).all() # for ssd from mmdet.core.anchor.anchor_generator import SSDAnchorGenerator featmap_sizes = [(38, 38), (19, 19), (10, 10)] anchor_generator = SSDAnchorGenerator( scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32], ratios=[[2], [2, 3], [2, 3]]) ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu') for i in range(len(featmap_sizes)): sparse_ssd_anchors = anchor_generator.sparse_priors( prior_idxs=prior_indexs, level_idx=i, featmap_size=featmap_sizes[i], device='cpu') assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all() # for yolo from mmdet.core.anchor.anchor_generator import YOLOAnchorGenerator featmap_sizes = [(38, 38), (19, 19), (10, 10)] anchor_generator = YOLOAnchorGenerator( strides=[32, 16, 8], base_sizes=[ [(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)], ]) yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu') for i in range(len(featmap_sizes)): sparse_yolo_anchors = anchor_generator.sparse_priors( prior_idxs=prior_indexs, level_idx=i, featmap_size=featmap_sizes[i], device='cpu') assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all() if torch.cuda.is_available(): mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0) prior_indexs = torch.Tensor([0, 3, 4, 5, 6, 7, 1, 2, 4, 5, 6, 9]).long().cuda() featmap_sizes = [(6, 8), (6, 4)] grid_anchors = mlvl_points.grid_priors( featmap_sizes=featmap_sizes, with_stride=False, device='cuda') sparse_prior = mlvl_points.sparse_priors( prior_idxs=prior_indexs, featmap_size=featmap_sizes[0], level_idx=0, device='cuda') assert (sparse_prior == grid_anchors[0][prior_indexs]).all() sparse_prior = mlvl_points.sparse_priors( prior_idxs=prior_indexs, featmap_size=featmap_sizes[1], level_idx=1, device='cuda') assert (sparse_prior == grid_anchors[1][prior_indexs]).all() assert sparse_prior.is_cuda mlvl_anchors = AnchorGenerator( strides=[16, 32], ratios=[1., 2.5], scales=[1., 5.], base_sizes=[4, 8]) prior_indexs = torch.Tensor([4, 5, 6, 7, 0, 2, 50, 4, 5, 6, 9]).long().cuda() featmap_sizes = [(13, 5), (16, 4)] grid_anchors = mlvl_anchors.grid_priors( featmap_sizes=featmap_sizes, device='cuda') sparse_prior = mlvl_anchors.sparse_priors( prior_idxs=prior_indexs, featmap_size=featmap_sizes[0], level_idx=0, device='cuda') assert (sparse_prior == grid_anchors[0][prior_indexs]).all() sparse_prior = mlvl_anchors.sparse_priors( prior_idxs=prior_indexs, featmap_size=featmap_sizes[1], level_idx=1, device='cuda') assert (sparse_prior == grid_anchors[1][prior_indexs]).all() # for ssd from mmdet.core.anchor.anchor_generator import SSDAnchorGenerator featmap_sizes = [(38, 38), (19, 19), (10, 10)] anchor_generator = SSDAnchorGenerator( scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32], ratios=[[2], [2, 3], [2, 3]]) ssd_anchors = anchor_generator.grid_anchors( featmap_sizes, device='cuda') for i in range(len(featmap_sizes)): sparse_ssd_anchors = anchor_generator.sparse_priors( prior_idxs=prior_indexs, level_idx=i, featmap_size=featmap_sizes[i], device='cuda') assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all() # for yolo from mmdet.core.anchor.anchor_generator import YOLOAnchorGenerator featmap_sizes = [(38, 38), (19, 19), (10, 10)] anchor_generator = YOLOAnchorGenerator( strides=[32, 16, 8], base_sizes=[ [(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)], ]) yolo_anchors = anchor_generator.grid_anchors( featmap_sizes, device='cuda') for i in range(len(featmap_sizes)): sparse_yolo_anchors = anchor_generator.sparse_priors( prior_idxs=prior_indexs, level_idx=i, featmap_size=featmap_sizes[i], device='cuda') assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all() def test_standard_anchor_generator(): from mmdet.core.anchor import build_anchor_generator anchor_generator_cfg = dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8]) anchor_generator = build_anchor_generator(anchor_generator_cfg) assert anchor_generator.num_base_priors == \ anchor_generator.num_base_anchors assert anchor_generator.num_base_priors == [3, 3] assert anchor_generator is not None def test_strides(): from mmdet.core import AnchorGenerator # Square strides self = AnchorGenerator([10], [1.], [1.], [10]) anchors = self.grid_anchors([(2, 2)], device='cpu') expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.], [-5., 5., 5., 15.], [5., 5., 15., 15.]]) assert torch.equal(anchors[0], expected_anchors) # Different strides in x and y direction self = AnchorGenerator([(10, 20)], [1.], [1.], [10]) anchors = self.grid_anchors([(2, 2)], device='cpu') expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.], [-5., 15., 5., 25.], [5., 15., 15., 25.]]) assert torch.equal(anchors[0], expected_anchors) def test_ssd_anchor_generator(): from mmdet.core.anchor import build_anchor_generator if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' # min_sizes max_sizes must set at the same time with pytest.raises(AssertionError): anchor_generator_cfg = dict( type='SSDAnchorGenerator', scale_major=False, min_sizes=[48, 100, 150, 202, 253, 300], max_sizes=None, strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]) build_anchor_generator(anchor_generator_cfg) # length of min_sizes max_sizes must be the same with pytest.raises(AssertionError): anchor_generator_cfg = dict( type='SSDAnchorGenerator', scale_major=False, min_sizes=[48, 100, 150, 202, 253, 300], max_sizes=[100, 150, 202, 253], strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]) build_anchor_generator(anchor_generator_cfg) # test setting anchor size manually anchor_generator_cfg = dict( type='SSDAnchorGenerator', scale_major=False, min_sizes=[48, 100, 150, 202, 253, 304], max_sizes=[100, 150, 202, 253, 304, 320], strides=[16, 32, 64, 107, 160, 320], ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]]) featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)] anchor_generator = build_anchor_generator(anchor_generator_cfg) expected_base_anchors = [ torch.Tensor([[-16.0000, -16.0000, 32.0000, 32.0000], [-26.6410, -26.6410, 42.6410, 42.6410], [-25.9411, -8.9706, 41.9411, 24.9706], [-8.9706, -25.9411, 24.9706, 41.9411], [-33.5692, -5.8564, 49.5692, 21.8564], [-5.8564, -33.5692, 21.8564, 49.5692]]), torch.Tensor([[-34.0000, -34.0000, 66.0000, 66.0000], [-45.2372, -45.2372, 77.2372, 77.2372], [-54.7107, -19.3553, 86.7107, 51.3553], [-19.3553, -54.7107, 51.3553, 86.7107], [-70.6025, -12.8675, 102.6025, 44.8675], [-12.8675, -70.6025, 44.8675, 102.6025]]), torch.Tensor([[-43.0000, -43.0000, 107.0000, 107.0000], [-55.0345, -55.0345, 119.0345, 119.0345], [-74.0660, -21.0330, 138.0660, 85.0330], [-21.0330, -74.0660, 85.0330, 138.0660], [-97.9038, -11.3013, 161.9038, 75.3013], [-11.3013, -97.9038, 75.3013, 161.9038]]), torch.Tensor([[-47.5000, -47.5000, 154.5000, 154.5000], [-59.5332, -59.5332, 166.5332, 166.5332], [-89.3356, -17.9178, 196.3356, 124.9178], [-17.9178, -89.3356, 124.9178, 196.3356], [-121.4371, -4.8124, 228.4371, 111.8124], [-4.8124, -121.4371, 111.8124, 228.4371]]), torch.Tensor([[-46.5000, -46.5000, 206.5000, 206.5000], [-58.6651, -58.6651, 218.6651, 218.6651], [-98.8980, -9.4490, 258.8980, 169.4490], [-9.4490, -98.8980, 169.4490, 258.8980], [-139.1044, 6.9652, 299.1044, 153.0348], [6.9652, -139.1044, 153.0348, 299.1044]]), torch.Tensor([[8.0000, 8.0000, 312.0000, 312.0000], [4.0513, 4.0513, 315.9487, 315.9487], [-54.9605, 52.5198, 374.9604, 267.4802], [52.5198, -54.9605, 267.4802, 374.9604], [-103.2717, 72.2428, 423.2717, 247.7572], [72.2428, -103.2717, 247.7572, 423.2717]]) ] base_anchors = anchor_generator.base_anchors for i, base_anchor in enumerate(base_anchors): assert base_anchor.allclose(expected_base_anchors[i]) # check valid flags expected_valid_pixels = [2400, 600, 150, 54, 24, 6] multi_level_valid_flags = anchor_generator.valid_flags( featmap_sizes, (320, 320), device) for i, single_level_valid_flag in enumerate(multi_level_valid_flags): assert single_level_valid_flag.sum() == expected_valid_pixels[i] # check number of base anchors for each level assert anchor_generator.num_base_anchors == [6, 6, 6, 6, 6, 6] # check anchor generation anchors = anchor_generator.grid_anchors(featmap_sizes, device) assert len(anchors) == 6 # test vgg ssd anchor setting anchor_generator_cfg = dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]) featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)] anchor_generator = build_anchor_generator(anchor_generator_cfg) # check base anchors expected_base_anchors = [ torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000], [-11.3704, -11.3704, 19.3704, 19.3704], [-10.8492, -3.4246, 18.8492, 11.4246], [-3.4246, -10.8492, 11.4246, 18.8492]]), torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000], [-25.3729, -25.3729, 41.3729, 41.3729], [-23.8198, -7.9099, 39.8198, 23.9099], [-7.9099, -23.8198, 23.9099, 39.8198], [-30.9711, -4.9904, 46.9711, 20.9904], [-4.9904, -30.9711, 20.9904, 46.9711]]), torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000], [-45.5366, -45.5366, 77.5366, 77.5366], [-54.0036, -19.0018, 86.0036, 51.0018], [-19.0018, -54.0036, 51.0018, 86.0036], [-69.7365, -12.5788, 101.7365, 44.5788], [-12.5788, -69.7365, 44.5788, 101.7365]]), torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000], [-56.9817, -56.9817, 120.9817, 120.9817], [-76.1873, -22.0937, 140.1873, 86.0937], [-22.0937, -76.1873, 86.0937, 140.1873], [-100.5019, -12.1673, 164.5019, 76.1673], [-12.1673, -100.5019, 76.1673, 164.5019]]), torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000], [-66.2185, -66.2185, 166.2185, 166.2185], [-96.3711, -23.1855, 196.3711, 123.1855], [-23.1855, -96.3711, 123.1855, 196.3711]]), torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000], [6.6342, 6.6342, 293.3658, 293.3658], [-34.5549, 57.7226, 334.5549, 242.2774], [57.7226, -34.5549, 242.2774, 334.5549]]), ] base_anchors = anchor_generator.base_anchors for i, base_anchor in enumerate(base_anchors): assert base_anchor.allclose(expected_base_anchors[i]) # check valid flags expected_valid_pixels = [5776, 2166, 600, 150, 36, 4] multi_level_valid_flags = anchor_generator.valid_flags( featmap_sizes, (300, 300), device) for i, single_level_valid_flag in enumerate(multi_level_valid_flags): assert single_level_valid_flag.sum() == expected_valid_pixels[i] # check number of base anchors for each level assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4] # check anchor generation anchors = anchor_generator.grid_anchors(featmap_sizes, device) assert len(anchors) == 6 def test_anchor_generator_with_tuples(): from mmdet.core.anchor import build_anchor_generator if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' anchor_generator_cfg = dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]) featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)] anchor_generator = build_anchor_generator(anchor_generator_cfg) anchors = anchor_generator.grid_anchors(featmap_sizes, device) anchor_generator_cfg_tuples = dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]) anchor_generator_tuples = build_anchor_generator( anchor_generator_cfg_tuples) anchors_tuples = anchor_generator_tuples.grid_anchors( featmap_sizes, device) for anchor, anchor_tuples in zip(anchors, anchors_tuples): assert torch.equal(anchor, anchor_tuples) def test_yolo_anchor_generator(): from mmdet.core.anchor import build_anchor_generator if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' anchor_generator_cfg = dict( type='YOLOAnchorGenerator', strides=[32, 16, 8], base_sizes=[ [(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)], ]) featmap_sizes = [(14, 18), (28, 36), (56, 72)] anchor_generator = build_anchor_generator(anchor_generator_cfg) # check base anchors expected_base_anchors = [ torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000], [-62.0000, -83.0000, 94.0000, 115.0000], [-170.5000, -147.0000, 202.5000, 179.0000]]), torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000], [-23.0000, -14.5000, 39.0000, 30.5000], [-21.5000, -51.5000, 37.5000, 67.5000]]), torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000], [-4.0000, -11.0000, 12.0000, 19.0000], [-12.5000, -7.5000, 20.5000, 15.5000]]) ] base_anchors = anchor_generator.base_anchors for i, base_anchor in enumerate(base_anchors): assert base_anchor.allclose(expected_base_anchors[i]) # check number of base anchors for each level assert anchor_generator.num_base_anchors == [3, 3, 3] # check anchor generation anchors = anchor_generator.grid_anchors(featmap_sizes, device) assert len(anchors) == 3 def test_retina_anchor(): from mmdet.models import build_head if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' # head configs modified from # configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py bbox_head = dict( type='RetinaSepBNHead', num_classes=4, num_ins=5, in_channels=4, stacked_convs=1, feat_channels=4, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0])) retina_head = build_head(bbox_head) assert retina_head.anchor_generator is not None # use the featmap sizes in NASFPN setting to test retina head featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)] # check base anchors expected_base_anchors = [ torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137], [-28.5088, -14.2544, 28.5088, 14.2544], [-35.9188, -17.9594, 35.9188, 17.9594], [-16.0000, -16.0000, 16.0000, 16.0000], [-20.1587, -20.1587, 20.1587, 20.1587], [-25.3984, -25.3984, 25.3984, 25.3984], [-11.3137, -22.6274, 11.3137, 22.6274], [-14.2544, -28.5088, 14.2544, 28.5088], [-17.9594, -35.9188, 17.9594, 35.9188]]), torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274], [-57.0175, -28.5088, 57.0175, 28.5088], [-71.8376, -35.9188, 71.8376, 35.9188], [-32.0000, -32.0000, 32.0000, 32.0000], [-40.3175, -40.3175, 40.3175, 40.3175], [-50.7968, -50.7968, 50.7968, 50.7968], [-22.6274, -45.2548, 22.6274, 45.2548], [-28.5088, -57.0175, 28.5088, 57.0175], [-35.9188, -71.8376, 35.9188, 71.8376]]), torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548], [-114.0350, -57.0175, 114.0350, 57.0175], [-143.6751, -71.8376, 143.6751, 71.8376], [-64.0000, -64.0000, 64.0000, 64.0000], [-80.6349, -80.6349, 80.6349, 80.6349], [-101.5937, -101.5937, 101.5937, 101.5937], [-45.2548, -90.5097, 45.2548, 90.5097], [-57.0175, -114.0350, 57.0175, 114.0350], [-71.8376, -143.6751, 71.8376, 143.6751]]), torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097], [-228.0701, -114.0350, 228.0701, 114.0350], [-287.3503, -143.6751, 287.3503, 143.6751], [-128.0000, -128.0000, 128.0000, 128.0000], [-161.2699, -161.2699, 161.2699, 161.2699], [-203.1873, -203.1873, 203.1873, 203.1873], [-90.5097, -181.0193, 90.5097, 181.0193], [-114.0350, -228.0701, 114.0350, 228.0701], [-143.6751, -287.3503, 143.6751, 287.3503]]), torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193], [-456.1401, -228.0701, 456.1401, 228.0701], [-574.7006, -287.3503, 574.7006, 287.3503], [-256.0000, -256.0000, 256.0000, 256.0000], [-322.5398, -322.5398, 322.5398, 322.5398], [-406.3747, -406.3747, 406.3747, 406.3747], [-181.0193, -362.0387, 181.0193, 362.0387], [-228.0701, -456.1401, 228.0701, 456.1401], [-287.3503, -574.7006, 287.3503, 574.7006]]) ] base_anchors = retina_head.anchor_generator.base_anchors for i, base_anchor in enumerate(base_anchors): assert base_anchor.allclose(expected_base_anchors[i]) # check valid flags expected_valid_pixels = [57600, 14400, 3600, 900, 225] multi_level_valid_flags = retina_head.anchor_generator.valid_flags( featmap_sizes, (640, 640), device) for i, single_level_valid_flag in enumerate(multi_level_valid_flags): assert single_level_valid_flag.sum() == expected_valid_pixels[i] # check number of base anchors for each level assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9] # check anchor generation anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device) assert len(anchors) == 5 def test_guided_anchor(): from mmdet.models import build_head if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' # head configs modified from # configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py bbox_head = dict( type='GARetinaHead', num_classes=8, in_channels=4, stacked_convs=1, feat_channels=4, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128])) ga_retina_head = build_head(bbox_head) assert ga_retina_head.approx_anchor_generator is not None # use the featmap sizes in NASFPN setting to test ga_retina_head featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)] # check base anchors expected_approxs = [ torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137], [-28.5088, -14.2544, 28.5088, 14.2544], [-35.9188, -17.9594, 35.9188, 17.9594], [-16.0000, -16.0000, 16.0000, 16.0000], [-20.1587, -20.1587, 20.1587, 20.1587], [-25.3984, -25.3984, 25.3984, 25.3984], [-11.3137, -22.6274, 11.3137, 22.6274], [-14.2544, -28.5088, 14.2544, 28.5088], [-17.9594, -35.9188, 17.9594, 35.9188]]), torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274], [-57.0175, -28.5088, 57.0175, 28.5088], [-71.8376, -35.9188, 71.8376, 35.9188], [-32.0000, -32.0000, 32.0000, 32.0000], [-40.3175, -40.3175, 40.3175, 40.3175], [-50.7968, -50.7968, 50.7968, 50.7968], [-22.6274, -45.2548, 22.6274, 45.2548], [-28.5088, -57.0175, 28.5088, 57.0175], [-35.9188, -71.8376, 35.9188, 71.8376]]), torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548], [-114.0350, -57.0175, 114.0350, 57.0175], [-143.6751, -71.8376, 143.6751, 71.8376], [-64.0000, -64.0000, 64.0000, 64.0000], [-80.6349, -80.6349, 80.6349, 80.6349], [-101.5937, -101.5937, 101.5937, 101.5937], [-45.2548, -90.5097, 45.2548, 90.5097], [-57.0175, -114.0350, 57.0175, 114.0350], [-71.8376, -143.6751, 71.8376, 143.6751]]), torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097], [-228.0701, -114.0350, 228.0701, 114.0350], [-287.3503, -143.6751, 287.3503, 143.6751], [-128.0000, -128.0000, 128.0000, 128.0000], [-161.2699, -161.2699, 161.2699, 161.2699], [-203.1873, -203.1873, 203.1873, 203.1873], [-90.5097, -181.0193, 90.5097, 181.0193], [-114.0350, -228.0701, 114.0350, 228.0701], [-143.6751, -287.3503, 143.6751, 287.3503]]), torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193], [-456.1401, -228.0701, 456.1401, 228.0701], [-574.7006, -287.3503, 574.7006, 287.3503], [-256.0000, -256.0000, 256.0000, 256.0000], [-322.5398, -322.5398, 322.5398, 322.5398], [-406.3747, -406.3747, 406.3747, 406.3747], [-181.0193, -362.0387, 181.0193, 362.0387], [-228.0701, -456.1401, 228.0701, 456.1401], [-287.3503, -574.7006, 287.3503, 574.7006]]) ] approxs = ga_retina_head.approx_anchor_generator.base_anchors for i, base_anchor in enumerate(approxs): assert base_anchor.allclose(expected_approxs[i]) # check valid flags expected_valid_pixels = [136800, 34200, 8550, 2223, 630] multi_level_valid_flags = ga_retina_head.approx_anchor_generator \ .valid_flags(featmap_sizes, (800, 1216), device) for i, single_level_valid_flag in enumerate(multi_level_valid_flags): assert single_level_valid_flag.sum() == expected_valid_pixels[i] # check number of base anchors for each level assert ga_retina_head.approx_anchor_generator.num_base_anchors == [ 9, 9, 9, 9, 9 ] # check approx generation squares = ga_retina_head.square_anchor_generator.grid_anchors( featmap_sizes, device) assert len(squares) == 5 expected_squares = [ torch.Tensor([[-16., -16., 16., 16.]]), torch.Tensor([[-32., -32., 32., 32]]), torch.Tensor([[-64., -64., 64., 64.]]), torch.Tensor([[-128., -128., 128., 128.]]), torch.Tensor([[-256., -256., 256., 256.]]) ] squares = ga_retina_head.square_anchor_generator.base_anchors for i, base_anchor in enumerate(squares): assert base_anchor.allclose(expected_squares[i]) # square_anchor_generator does not check valid flags # check number of base anchors for each level assert (ga_retina_head.square_anchor_generator.num_base_anchors == [ 1, 1, 1, 1, 1 ]) # check square generation anchors = ga_retina_head.square_anchor_generator.grid_anchors( featmap_sizes, device) assert len(anchors) == 5 ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_assigner.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """Tests the Assigner objects. CommandLine: pytest tests/test_utils/test_assigner.py xdoctest tests/test_utils/test_assigner.py zero """ import pytest import torch from mmdet.core.bbox.assigners import (ApproxMaxIoUAssigner, CenterRegionAssigner, HungarianAssigner, MaskHungarianAssigner, MaxIoUAssigner, PointAssigner, SimOTAAssigner, TaskAlignedAssigner, UniformAssigner) def test_max_iou_assigner(): self = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_labels = torch.LongTensor([2, 3]) assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels) assert len(assign_result.gt_inds) == 4 assert len(assign_result.labels) == 4 expected_gt_inds = torch.LongTensor([1, 0, 2, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_max_iou_assigner_with_ignore(): self = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [30, 32, 40, 42], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_bboxes_ignore = torch.Tensor([ [30, 30, 40, 40], ]) assign_result = self.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore) expected_gt_inds = torch.LongTensor([1, 0, 2, -1]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_max_iou_assigner_with_empty_gt(): """Test corner case where an image might have no true detections.""" self = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.empty(0, 4) assign_result = self.assign(bboxes, gt_bboxes) expected_gt_inds = torch.LongTensor([0, 0, 0, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_max_iou_assigner_with_empty_boxes(): """Test corner case where a network might predict no boxes.""" self = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.empty((0, 4)) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_labels = torch.LongTensor([2, 3]) # Test with gt_labels assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels) assert len(assign_result.gt_inds) == 0 assert tuple(assign_result.labels.shape) == (0, ) # Test without gt_labels assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None) assert len(assign_result.gt_inds) == 0 assert assign_result.labels is None def test_max_iou_assigner_with_empty_boxes_and_ignore(): """Test corner case where a network might predict no boxes and ignore_iof_thr is on.""" self = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ) bboxes = torch.empty((0, 4)) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_bboxes_ignore = torch.Tensor([ [30, 30, 40, 40], ]) gt_labels = torch.LongTensor([2, 3]) # Test with gt_labels assign_result = self.assign( bboxes, gt_bboxes, gt_labels=gt_labels, gt_bboxes_ignore=gt_bboxes_ignore) assert len(assign_result.gt_inds) == 0 assert tuple(assign_result.labels.shape) == (0, ) # Test without gt_labels assign_result = self.assign( bboxes, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore) assert len(assign_result.gt_inds) == 0 assert assign_result.labels is None def test_max_iou_assigner_with_empty_boxes_and_gt(): """Test corner case where a network might predict no boxes and no gt.""" self = MaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.empty((0, 4)) gt_bboxes = torch.empty((0, 4)) assign_result = self.assign(bboxes, gt_bboxes) assert len(assign_result.gt_inds) == 0 def test_point_assigner(): self = PointAssigner() points = torch.FloatTensor([ # [x, y, stride] [0, 0, 1], [10, 10, 1], [5, 5, 1], [32, 32, 1], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) assign_result = self.assign(points, gt_bboxes) expected_gt_inds = torch.LongTensor([1, 2, 1, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_point_assigner_with_empty_gt(): """Test corner case where an image might have no true detections.""" self = PointAssigner() points = torch.FloatTensor([ # [x, y, stride] [0, 0, 1], [10, 10, 1], [5, 5, 1], [32, 32, 1], ]) gt_bboxes = torch.FloatTensor([]) assign_result = self.assign(points, gt_bboxes) expected_gt_inds = torch.LongTensor([0, 0, 0, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_point_assigner_with_empty_boxes_and_gt(): """Test corner case where an image might predict no points and no gt.""" self = PointAssigner() points = torch.FloatTensor([]) gt_bboxes = torch.FloatTensor([]) assign_result = self.assign(points, gt_bboxes) assert len(assign_result.gt_inds) == 0 def test_approx_iou_assigner(): self = ApproxMaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) approxs_per_octave = 1 approxs = bboxes squares = bboxes assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes) expected_gt_inds = torch.LongTensor([1, 0, 2, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_approx_iou_assigner_with_empty_gt(): """Test corner case where an image might have no true detections.""" self = ApproxMaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.FloatTensor([]) approxs_per_octave = 1 approxs = bboxes squares = bboxes assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes) expected_gt_inds = torch.LongTensor([0, 0, 0, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_approx_iou_assigner_with_empty_boxes(): """Test corner case where an network might predict no boxes.""" self = ApproxMaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.empty((0, 4)) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) approxs_per_octave = 1 approxs = bboxes squares = bboxes assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes) assert len(assign_result.gt_inds) == 0 def test_approx_iou_assigner_with_empty_boxes_and_gt(): """Test corner case where an network might predict no boxes and no gt.""" self = ApproxMaxIoUAssigner( pos_iou_thr=0.5, neg_iou_thr=0.5, ) bboxes = torch.empty((0, 4)) gt_bboxes = torch.empty((0, 4)) approxs_per_octave = 1 approxs = bboxes squares = bboxes assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes) assert len(assign_result.gt_inds) == 0 def test_random_assign_result(): """Test random instantiation of assign result to catch corner cases.""" from mmdet.core.bbox.assigners.assign_result import AssignResult AssignResult.random() AssignResult.random(num_gts=0, num_preds=0) AssignResult.random(num_gts=0, num_preds=3) AssignResult.random(num_gts=3, num_preds=3) AssignResult.random(num_gts=0, num_preds=3) AssignResult.random(num_gts=7, num_preds=7) AssignResult.random(num_gts=7, num_preds=64) AssignResult.random(num_gts=24, num_preds=3) def test_center_region_assigner(): self = CenterRegionAssigner(pos_scale=0.3, neg_scale=1) bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [8, 8, 9, 9]]) gt_bboxes = torch.FloatTensor([ [0, 0, 11, 11], # match bboxes[0] [10, 10, 20, 20], # match bboxes[1] [4.5, 4.5, 5.5, 5.5], # match bboxes[0] but area is too small [0, 0, 10, 10], # match bboxes[1] and has a smaller area than gt[0] ]) gt_labels = torch.LongTensor([2, 3, 4, 5]) assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels) assert len(assign_result.gt_inds) == 3 assert len(assign_result.labels) == 3 expected_gt_inds = torch.LongTensor([4, 2, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) shadowed_labels = assign_result.get_extra_property('shadowed_labels') # [8, 8, 9, 9] in the shadowed region of [0, 0, 11, 11] (label: 2) assert torch.any(shadowed_labels == torch.LongTensor([[2, 2]])) # [8, 8, 9, 9] in the shadowed region of [0, 0, 10, 10] (label: 5) assert torch.any(shadowed_labels == torch.LongTensor([[2, 5]])) # [0, 0, 10, 10] is already assigned to [4.5, 4.5, 5.5, 5.5]. # Therefore, [0, 0, 11, 11] (label: 2) is shadowed assert torch.any(shadowed_labels == torch.LongTensor([[0, 2]])) def test_center_region_assigner_with_ignore(): self = CenterRegionAssigner( pos_scale=0.5, neg_scale=1, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 10], # match bboxes[0] [10, 10, 20, 20], # match bboxes[1] ]) gt_bboxes_ignore = torch.FloatTensor([ [0, 0, 10, 10], # match bboxes[0] ]) gt_labels = torch.LongTensor([1, 2]) assign_result = self.assign( bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels) assert len(assign_result.gt_inds) == 2 assert len(assign_result.labels) == 2 expected_gt_inds = torch.LongTensor([-1, 2]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_center_region_assigner_with_empty_bboxes(): self = CenterRegionAssigner( pos_scale=0.5, neg_scale=1, ) bboxes = torch.empty((0, 4)).float() gt_bboxes = torch.FloatTensor([ [0, 0, 10, 10], # match bboxes[0] [10, 10, 20, 20], # match bboxes[1] ]) gt_labels = torch.LongTensor([1, 2]) assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels) assert assign_result.gt_inds is None or assign_result.gt_inds.numel() == 0 assert assign_result.labels is None or assign_result.labels.numel() == 0 def test_center_region_assigner_with_empty_gts(): self = CenterRegionAssigner( pos_scale=0.5, neg_scale=1, ) bboxes = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], ]) gt_bboxes = torch.empty((0, 4)).float() gt_labels = torch.empty((0, )).long() assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels) assert len(assign_result.gt_inds) == 2 expected_gt_inds = torch.LongTensor([0, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_hungarian_match_assigner(): self = HungarianAssigner() assert self.iou_cost.iou_mode == 'giou' # test no gt bboxes bbox_pred = torch.rand((10, 4)) cls_pred = torch.rand((10, 81)) gt_bboxes = torch.empty((0, 4)).float() gt_labels = torch.empty((0, )).long() img_meta = dict(img_shape=(10, 8, 3)) assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta) assert torch.all(assign_result.gt_inds == 0) assert torch.all(assign_result.labels == -1) # test with gt bboxes gt_bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]]) gt_labels = torch.LongTensor([1, 20]) assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0) assert (assign_result.labels > -1).sum() == gt_bboxes.size(0) # test iou mode self = HungarianAssigner( iou_cost=dict(type='IoUCost', iou_mode='iou', weight=1.0)) assert self.iou_cost.iou_mode == 'iou' assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0) assert (assign_result.labels > -1).sum() == gt_bboxes.size(0) # test focal loss mode self = HungarianAssigner( iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0), cls_cost=dict(type='FocalLossCost', weight=1.)) assert self.iou_cost.iou_mode == 'giou' assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0) assert (assign_result.labels > -1).sum() == gt_bboxes.size(0) def test_uniform_assigner(): self = UniformAssigner(0.15, 0.7, 1) pred_bbox = torch.FloatTensor([ [1, 1, 12, 8], [4, 4, 20, 20], [1, 5, 15, 15], [30, 5, 32, 42], ]) anchor = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_labels = torch.LongTensor([2, 3]) assign_result = self.assign( pred_bbox, anchor, gt_bboxes, gt_labels=gt_labels) assert len(assign_result.gt_inds) == 4 assert len(assign_result.labels) == 4 expected_gt_inds = torch.LongTensor([-1, 0, 2, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_uniform_assigner_with_empty_gt(): """Test corner case where an image might have no true detections.""" self = UniformAssigner(0.15, 0.7, 1) pred_bbox = torch.FloatTensor([ [1, 1, 12, 8], [4, 4, 20, 20], [1, 5, 15, 15], [30, 5, 32, 42], ]) anchor = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.empty(0, 4) assign_result = self.assign(pred_bbox, anchor, gt_bboxes) expected_gt_inds = torch.LongTensor([0, 0, 0, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_uniform_assigner_with_empty_boxes(): """Test corner case where a network might predict no boxes.""" self = UniformAssigner(0.15, 0.7, 1) pred_bbox = torch.empty((0, 4)) anchor = torch.empty((0, 4)) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_labels = torch.LongTensor([2, 3]) # Test with gt_labels assign_result = self.assign( pred_bbox, anchor, gt_bboxes, gt_labels=gt_labels) assert len(assign_result.gt_inds) == 0 assert tuple(assign_result.labels.shape) == (0, ) # Test without gt_labels assign_result = self.assign(pred_bbox, anchor, gt_bboxes, gt_labels=None) assert len(assign_result.gt_inds) == 0 def test_sim_ota_assigner(): self = SimOTAAssigner( center_radius=2.5, candidate_topk=1, iou_weight=3.0, cls_weight=1.0) pred_scores = torch.FloatTensor([[0.2], [0.8]]) priors = torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]) decoded_bboxes = torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]]) gt_bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]) gt_labels = torch.LongTensor([2]) assign_result = self.assign(pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels) expected_gt_inds = torch.LongTensor([0, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_task_aligned_assigner(): with pytest.raises(AssertionError): TaskAlignedAssigner(topk=0) self = TaskAlignedAssigner(topk=13) pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.4, 0.5]]) pred_bbox = torch.FloatTensor([ [1, 1, 12, 8], [4, 4, 20, 20], [1, 5, 15, 15], [30, 5, 32, 42], ]) anchor = torch.FloatTensor([ [0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42], ]) gt_bboxes = torch.FloatTensor([ [0, 0, 10, 9], [0, 10, 10, 19], ]) gt_labels = torch.LongTensor([0, 1]) assign_result = self.assign( pred_score, pred_bbox, anchor, gt_bboxes=gt_bboxes, gt_labels=gt_labels) assert len(assign_result.gt_inds) == 4 assert len(assign_result.labels) == 4 # test empty gt gt_bboxes = torch.empty(0, 4) gt_labels = torch.empty(0, 2) assign_result = self.assign( pred_score, pred_bbox, anchor, gt_bboxes=gt_bboxes) expected_gt_inds = torch.LongTensor([0, 0, 0, 0]) assert torch.all(assign_result.gt_inds == expected_gt_inds) def test_mask_hungarian_match_assigner(): # test no gt masks assigner_cfg = dict( cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict(type='FocalLossCost', weight=20.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=1.0, pred_act=True, eps=1.0)) self = MaskHungarianAssigner(**assigner_cfg) cls_pred = torch.rand((10, 133)) mask_pred = torch.rand((10, 50, 50)) gt_labels = torch.empty((0, )).long() gt_masks = torch.empty((0, 50, 50)).float() img_meta = None assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks, img_meta) assert torch.all(assign_result.gt_inds == 0) assert torch.all(assign_result.labels == -1) # test with gt masks of naive_dice is True gt_labels = torch.LongTensor([10, 100]) gt_masks = torch.zeros((2, 50, 50)).long() gt_masks[0, :25] = 1 gt_masks[0, 25:] = 1 assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0) assert (assign_result.labels > -1).sum() == gt_labels.size(0) # test with cls mode assigner_cfg = dict( cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0)) self = MaskHungarianAssigner(**assigner_cfg) assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0) assert (assign_result.labels > -1).sum() == gt_labels.size(0) # test with mask focal mode assigner_cfg = dict( cls_cost=dict(type='ClassificationCost', weight=0.0), mask_cost=dict(type='FocalLossCost', weight=1.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0)) self = MaskHungarianAssigner(**assigner_cfg) assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0) assert (assign_result.labels > -1).sum() == gt_labels.size(0) # test with mask dice mode assigner_cfg = dict( cls_cost=dict(type='ClassificationCost', weight=0.0), mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=1.0, pred_act=True, eps=1.0)) self = MaskHungarianAssigner(**assigner_cfg) assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0) assert (assign_result.labels > -1).sum() == gt_labels.size(0) # test with mask dice mode that naive_dice is False assigner_cfg = dict( cls_cost=dict(type='ClassificationCost', weight=0.0), mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True), dice_cost=dict( type='DiceCost', weight=1.0, pred_act=True, eps=1.0, naive_dice=False)) self = MaskHungarianAssigner(**assigner_cfg) assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0) assert (assign_result.labels > -1).sum() == gt_labels.size(0) # test with mask bce mode assigner_cfg = dict( cls_cost=dict(type='ClassificationCost', weight=0.0), mask_cost=dict( type='CrossEntropyLossCost', weight=1.0, use_sigmoid=True), dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0)) self = MaskHungarianAssigner(**assigner_cfg) assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks, img_meta) assert torch.all(assign_result.gt_inds > -1) assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0) assert (assign_result.labels > -1).sum() == gt_labels.size(0) # test with ce mode of CrossEntropyLossCost which is not supported yet assigner_cfg = dict( cls_cost=dict(type='ClassificationCost', weight=0.0), mask_cost=dict( type='CrossEntropyLossCost', weight=1.0, use_sigmoid=False), dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0)) with pytest.raises(AssertionError): self = MaskHungarianAssigner(**assigner_cfg) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_coder.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest import torch from mmdet.core.bbox.coder import (DeltaXYWHBBoxCoder, DistancePointBBoxCoder, TBLRBBoxCoder, YOLOBBoxCoder) def test_yolo_bbox_coder(): coder = YOLOBBoxCoder() bboxes = torch.Tensor([[-42., -29., 74., 61.], [-10., -29., 106., 61.], [22., -29., 138., 61.], [54., -29., 170., 61.]]) pred_bboxes = torch.Tensor([[0.4709, 0.6152, 0.1690, -0.4056], [0.5399, 0.6653, 0.1162, -0.4162], [0.4654, 0.6618, 0.1548, -0.4301], [0.4786, 0.6197, 0.1896, -0.4479]]) grid_size = 32 expected_decode_bboxes = torch.Tensor( [[-53.6102, -10.3096, 83.7478, 49.6824], [-15.8700, -8.3901, 114.4236, 50.9693], [11.1822, -8.0924, 146.6034, 50.4476], [41.2068, -8.9232, 181.4236, 48.5840]]) assert expected_decode_bboxes.allclose( coder.decode(bboxes, pred_bboxes, grid_size)) def test_delta_bbox_coder(): coder = DeltaXYWHBBoxCoder() rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.], [5., 5., 5., 5.]]) deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]]) expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) out = coder.decode(rois, deltas, max_shape=(32, 32)) assert expected_decode_bboxes.allclose(out, atol=1e-04) out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32))) assert expected_decode_bboxes.allclose(out, atol=1e-04) batch_rois = rois.unsqueeze(0).repeat(2, 1, 1) batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1) batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0] assert out.allclose(batch_out) batch_out = coder.decode( batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0] assert out.allclose(batch_out) # test max_shape is not equal to batch with pytest.raises(AssertionError): coder.decode( batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)]) rois = torch.zeros((0, 4)) deltas = torch.zeros((0, 4)) out = coder.decode(rois, deltas, max_shape=(32, 32)) assert rois.shape == out.shape # test add_ctr_clamp coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2) rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.], [5., 5., 5., 5.]]) deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.], [0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]]) expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) out = coder.decode(rois, deltas, max_shape=(32, 32)) assert expected_decode_bboxes.allclose(out, atol=1e-04) def test_tblr_bbox_coder(): coder = TBLRBBoxCoder(normalizer=15.) rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.], [5., 5., 5., 5.]]) deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]]) expected_decode_bboxes = torch.Tensor([[0.5000, 0.5000, 0.5000, 0.5000], [0.0000, 0.0000, 12.0000, 13.0000], [0.0000, 0.5000, 0.0000, 0.5000], [5.0000, 5.0000, 5.0000, 5.0000]]) out = coder.decode(rois, deltas, max_shape=(13, 12)) assert expected_decode_bboxes.allclose(out) out = coder.decode(rois, deltas, max_shape=torch.Tensor((13, 12))) assert expected_decode_bboxes.allclose(out) batch_rois = rois.unsqueeze(0).repeat(2, 1, 1) batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1) batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(13, 12))[0] assert out.allclose(batch_out) batch_out = coder.decode( batch_rois, batch_deltas, max_shape=[(13, 12), (13, 12)])[0] assert out.allclose(batch_out) # test max_shape is not equal to batch with pytest.raises(AssertionError): coder.decode(batch_rois, batch_deltas, max_shape=[(13, 12)]) rois = torch.zeros((0, 4)) deltas = torch.zeros((0, 4)) out = coder.decode(rois, deltas, max_shape=(32, 32)) assert rois.shape == out.shape def test_distance_point_bbox_coder(): coder = DistancePointBBoxCoder() points = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]]) gt_bboxes = torch.Tensor([[74., 61., 75., 62.], [0., 104., 0., 112.], [100., 90., 100., 120.], [0., 120., 100., 120.]]) expected_distance = torch.Tensor([[0., 0., 1., 1.], [0., 2., 29., 6.], [38., 0., 0., 50.], [29., 50., 50., 0.]]) out_distance = coder.encode(points, gt_bboxes, max_dis=50, eps=0) assert expected_distance.allclose(out_distance) distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.], [22., -29., 138., 61.], [54., -29., 170., 61.]]) out_bbox = coder.decode(points, distance, max_shape=(120, 100)) assert gt_bboxes.allclose(out_bbox) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_compat_config.py ================================================ import pytest from mmcv import ConfigDict from mmdet.utils.compat_config import (compat_imgs_per_gpu, compat_loader_args, compat_runner_args) def test_compat_runner_args(): cfg = ConfigDict(dict(total_epochs=12)) with pytest.warns(None) as record: cfg = compat_runner_args(cfg) assert len(record) == 1 assert 'runner' in record.list[0].message.args[0] assert 'runner' in cfg assert cfg.runner.type == 'EpochBasedRunner' assert cfg.runner.max_epochs == cfg.total_epochs def test_compat_loader_args(): cfg = ConfigDict(dict(data=dict(val=dict(), test=dict(), train=dict()))) cfg = compat_loader_args(cfg) # auto fill loader args assert 'val_dataloader' in cfg.data assert 'train_dataloader' in cfg.data assert 'test_dataloader' in cfg.data cfg = ConfigDict( dict( data=dict( samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=dict(samples_per_gpu=2), train=dict()))) cfg = compat_loader_args(cfg) assert cfg.data.train_dataloader.workers_per_gpu == 1 assert cfg.data.train_dataloader.samples_per_gpu == 1 assert cfg.data.train_dataloader.persistent_workers assert cfg.data.val_dataloader.workers_per_gpu == 1 assert cfg.data.val_dataloader.samples_per_gpu == 3 assert cfg.data.test_dataloader.workers_per_gpu == 1 assert cfg.data.test_dataloader.samples_per_gpu == 2 # test test is a list cfg = ConfigDict( dict( data=dict( samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=[dict(samples_per_gpu=2), dict(samples_per_gpu=3)], train=dict()))) cfg = compat_loader_args(cfg) assert cfg.data.test_dataloader.samples_per_gpu == 3 # assert can not set args at the same time cfg = ConfigDict( dict( data=dict( samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=dict(samples_per_gpu=2), train=dict(), train_dataloader=dict(samples_per_gpu=2)))) # samples_per_gpu can not be set in `train_dataloader` # and data field at the same time with pytest.raises(AssertionError): compat_loader_args(cfg) cfg = ConfigDict( dict( data=dict( samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=dict(samples_per_gpu=2), train=dict(), val_dataloader=dict(samples_per_gpu=2)))) # samples_per_gpu can not be set in `val_dataloader` # and data field at the same time with pytest.raises(AssertionError): compat_loader_args(cfg) cfg = ConfigDict( dict( data=dict( samples_per_gpu=1, persistent_workers=True, workers_per_gpu=1, val=dict(samples_per_gpu=3), test=dict(samples_per_gpu=2), test_dataloader=dict(samples_per_gpu=2)))) # samples_per_gpu can not be set in `test_dataloader` # and data field at the same time with pytest.raises(AssertionError): compat_loader_args(cfg) def test_compat_imgs_per_gpu(): cfg = ConfigDict( dict( data=dict( imgs_per_gpu=1, samples_per_gpu=2, val=dict(), test=dict(), train=dict()))) cfg = compat_imgs_per_gpu(cfg) assert cfg.data.samples_per_gpu == cfg.data.imgs_per_gpu ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_general_data.py ================================================ import copy import numpy as np import pytest import torch from mmdet.core import GeneralData, InstanceData def _equal(a, b): if isinstance(a, (torch.Tensor, np.ndarray)): return (a == b).all() else: return a == b def test_general_data(): # test init meta_info = dict( img_size=[256, 256], path='dadfaff', scale_factor=np.array([1.5, 1.5]), img_shape=torch.rand(4)) data = dict( bboxes=torch.rand(4, 4), labels=torch.rand(4), masks=np.random.rand(4, 2, 2)) instance_data = GeneralData(meta_info=meta_info) assert 'img_size' in instance_data assert instance_data.img_size == [256, 256] assert instance_data['img_size'] == [256, 256] assert 'path' in instance_data assert instance_data.path == 'dadfaff' # test nice_repr repr_instance_data = instance_data.new(data=data) nice_repr = str(repr_instance_data) for line in nice_repr.split('\n'): if 'masks' in line: assert 'shape' in line assert '(4, 2, 2)' in line if 'bboxes' in line: assert 'shape' in line assert 'torch.Size([4, 4])' in line if 'path' in line: assert 'dadfaff' in line if 'scale_factor' in line: assert '[1.5 1.5]' in line instance_data = GeneralData( meta_info=meta_info, data=dict(bboxes=torch.rand(5))) assert 'bboxes' in instance_data assert len(instance_data.bboxes) == 5 # data should be a dict with pytest.raises(AssertionError): GeneralData(data=1) # test set data instance_data = GeneralData() instance_data.set_data(data) assert 'bboxes' in instance_data assert len(instance_data.bboxes) == 4 assert 'masks' in instance_data assert len(instance_data.masks) == 4 # data should be a dict with pytest.raises(AssertionError): instance_data.set_data(data=1) # test set_meta instance_data = GeneralData() instance_data.set_meta_info(meta_info) assert 'img_size' in instance_data assert instance_data.img_size == [256, 256] assert instance_data['img_size'] == [256, 256] assert 'path' in instance_data assert instance_data.path == 'dadfaff' # can skip same value when overwrite instance_data.set_meta_info(meta_info) # meta should be a dict with pytest.raises(AssertionError): instance_data.set_meta_info(meta_info='fjhka') # attribute in `_meta_info_field` is immutable once initialized instance_data.set_meta_info(meta_info) # meta should be immutable with pytest.raises(KeyError): instance_data.set_meta_info(dict(img_size=[254, 251])) with pytest.raises(KeyError): duplicate_meta_info = copy.deepcopy(meta_info) duplicate_meta_info['path'] = 'dada' instance_data.set_meta_info(duplicate_meta_info) with pytest.raises(KeyError): duplicate_meta_info = copy.deepcopy(meta_info) duplicate_meta_info['scale_factor'] = np.array([1.5, 1.6]) instance_data.set_meta_info(duplicate_meta_info) # test new_instance_data instance_data = GeneralData(meta_info) new_instance_data = instance_data.new() for k, v in instance_data.meta_info_items(): assert k in new_instance_data _equal(v, new_instance_data[k]) instance_data = GeneralData(meta_info, data=data) temp_meta = copy.deepcopy(meta_info) temp_data = copy.deepcopy(data) temp_data['time'] = '12212' temp_meta['img_norm'] = np.random.random(3) new_instance_data = instance_data.new(meta_info=temp_meta, data=temp_data) for k, v in new_instance_data.meta_info_items(): if k in instance_data: _equal(v, instance_data[k]) else: assert _equal(v, temp_meta[k]) assert k == 'img_norm' for k, v in new_instance_data.items(): if k in instance_data: _equal(v, instance_data[k]) else: assert k == 'time' assert _equal(v, temp_data[k]) # test keys instance_data = GeneralData(meta_info, data=dict(bboxes=10)) assert 'bboxes' in instance_data.keys() instance_data.b = 10 assert 'b' in instance_data # test meta keys instance_data = GeneralData(meta_info, data=dict(bboxes=10)) assert 'path' in instance_data.meta_info_keys() assert len(instance_data.meta_info_keys()) == len(meta_info) instance_data.set_meta_info(dict(workdir='fafaf')) assert 'workdir' in instance_data assert len(instance_data.meta_info_keys()) == len(meta_info) + 1 # test values instance_data = GeneralData(meta_info, data=dict(bboxes=10)) assert 10 in instance_data.values() assert len(instance_data.values()) == 1 # test meta values instance_data = GeneralData(meta_info, data=dict(bboxes=10)) # torch 1.3 eq() can not compare str and tensor from mmdet import digit_version if digit_version(torch.__version__) >= [1, 4]: assert 'dadfaff' in instance_data.meta_info_values() assert len(instance_data.meta_info_values()) == len(meta_info) # test items instance_data = GeneralData(data=data) for k, v in instance_data.items(): assert k in data assert _equal(v, data[k]) # test meta_info_items instance_data = GeneralData(meta_info=meta_info) for k, v in instance_data.meta_info_items(): assert k in meta_info assert _equal(v, meta_info[k]) # test __setattr__ new_instance_data = GeneralData(data=data) new_instance_data.mask = torch.rand(3, 4, 5) new_instance_data.bboxes = torch.rand(2, 4) assert 'mask' in new_instance_data assert len(new_instance_data.mask) == 3 assert len(new_instance_data.bboxes) == 2 # test instance_data_field has been updated assert 'mask' in new_instance_data._data_fields assert 'bboxes' in new_instance_data._data_fields for k in data: assert k in new_instance_data._data_fields # '_meta_info_field', '_data_fields' is immutable. with pytest.raises(AttributeError): new_instance_data._data_fields = None with pytest.raises(AttributeError): new_instance_data._meta_info_fields = None with pytest.raises(AttributeError): del new_instance_data._data_fields with pytest.raises(AttributeError): del new_instance_data._meta_info_fields # key in _meta_info_field is immutable new_instance_data.set_meta_info(meta_info) with pytest.raises(KeyError): del new_instance_data.img_size with pytest.raises(KeyError): del new_instance_data.scale_factor for k in new_instance_data.meta_info_keys(): with pytest.raises(AttributeError): new_instance_data[k] = None # test __delattr__ # test key can be removed in instance_data_field assert 'mask' in new_instance_data._data_fields assert 'mask' in new_instance_data.keys() assert 'mask' in new_instance_data assert hasattr(new_instance_data, 'mask') del new_instance_data.mask assert 'mask' not in new_instance_data.keys() assert 'mask' not in new_instance_data assert 'mask' not in new_instance_data._data_fields assert not hasattr(new_instance_data, 'mask') # tset __delitem__ new_instance_data.mask = torch.rand(1, 2, 3) assert 'mask' in new_instance_data._data_fields assert 'mask' in new_instance_data assert hasattr(new_instance_data, 'mask') del new_instance_data['mask'] assert 'mask' not in new_instance_data assert 'mask' not in new_instance_data._data_fields assert 'mask' not in new_instance_data assert not hasattr(new_instance_data, 'mask') # test __setitem__ new_instance_data['mask'] = torch.rand(1, 2, 3) assert 'mask' in new_instance_data._data_fields assert 'mask' in new_instance_data.keys() assert hasattr(new_instance_data, 'mask') # test data_fields has been updated assert 'mask' in new_instance_data.keys() assert 'mask' in new_instance_data._data_fields # '_meta_info_field', '_data_fields' is immutable. with pytest.raises(AttributeError): del new_instance_data['_data_fields'] with pytest.raises(AttributeError): del new_instance_data['_meta_info_field'] # test __getitem__ new_instance_data.mask is new_instance_data['mask'] # test get assert new_instance_data.get('mask') is new_instance_data.mask assert new_instance_data.get('none_attribute', None) is None assert new_instance_data.get('none_attribute', 1) == 1 # test pop mask = new_instance_data.mask assert new_instance_data.pop('mask') is mask assert new_instance_data.pop('mask', None) is None assert new_instance_data.pop('mask', 1) == 1 # '_meta_info_field', '_data_fields' is immutable. with pytest.raises(KeyError): new_instance_data.pop('_data_fields') with pytest.raises(KeyError): new_instance_data.pop('_meta_info_field') # attribute in `_meta_info_field` is immutable with pytest.raises(KeyError): new_instance_data.pop('img_size') # test pop attribute in instance_data_filed new_instance_data['mask'] = torch.rand(1, 2, 3) new_instance_data.pop('mask') # test data_field has been updated assert 'mask' not in new_instance_data assert 'mask' not in new_instance_data._data_fields assert 'mask' not in new_instance_data # test_keys new_instance_data.mask = torch.ones(1, 2, 3) 'mask' in new_instance_data.keys() has_flag = False for key in new_instance_data.keys(): if key == 'mask': has_flag = True assert has_flag # test values assert len(list(new_instance_data.keys())) == len( list(new_instance_data.values())) mask = new_instance_data.mask has_flag = False for value in new_instance_data.values(): if value is mask: has_flag = True assert has_flag # test items assert len(list(new_instance_data.keys())) == len( list(new_instance_data.items())) mask = new_instance_data.mask has_flag = False for key, value in new_instance_data.items(): if value is mask: assert key == 'mask' has_flag = True assert has_flag # test device new_instance_data = GeneralData() if torch.cuda.is_available(): newnew_instance_data = new_instance_data.new() devices = ('cpu', 'cuda') for i in range(10): device = devices[i % 2] newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device) newnew_instance_data = newnew_instance_data.cpu() for value in newnew_instance_data.values(): assert not value.is_cuda newnew_instance_data = new_instance_data.new() devices = ('cuda', 'cpu') for i in range(10): device = devices[i % 2] newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device) newnew_instance_data = newnew_instance_data.cuda() for value in newnew_instance_data.values(): assert value.is_cuda # test to double_instance_data = instance_data.new() double_instance_data.long = torch.LongTensor(1, 2, 3, 4) double_instance_data.bool = torch.BoolTensor(1, 2, 3, 4) double_instance_data = instance_data.to(torch.double) for k, v in double_instance_data.items(): if isinstance(v, torch.Tensor): assert v.dtype is torch.double # test .cpu() .cuda() if torch.cuda.is_available(): cpu_instance_data = double_instance_data.new() cpu_instance_data.mask = torch.rand(1) cuda_tensor = torch.rand(1, 2, 3).cuda() cuda_instance_data = cpu_instance_data.to(cuda_tensor.device) for value in cuda_instance_data.values(): assert value.is_cuda cpu_instance_data = cuda_instance_data.cpu() for value in cpu_instance_data.values(): assert not value.is_cuda cuda_instance_data = cpu_instance_data.cuda() for value in cuda_instance_data.values(): assert value.is_cuda # test detach grad_instance_data = double_instance_data.new() grad_instance_data.mask = torch.rand(2, requires_grad=True) grad_instance_data.mask_1 = torch.rand(2, requires_grad=True) detach_instance_data = grad_instance_data.detach() for value in detach_instance_data.values(): assert not value.requires_grad # test numpy tensor_instance_data = double_instance_data.new() tensor_instance_data.mask = torch.rand(2, requires_grad=True) tensor_instance_data.mask_1 = torch.rand(2, requires_grad=True) numpy_instance_data = tensor_instance_data.numpy() for value in numpy_instance_data.values(): assert isinstance(value, np.ndarray) if torch.cuda.is_available(): tensor_instance_data = double_instance_data.new() tensor_instance_data.mask = torch.rand(2) tensor_instance_data.mask_1 = torch.rand(2) tensor_instance_data = tensor_instance_data.cuda() numpy_instance_data = tensor_instance_data.numpy() for value in numpy_instance_data.values(): assert isinstance(value, np.ndarray) instance_data['_c'] = 10000 instance_data.get('dad', None) is None assert hasattr(instance_data, '_c') del instance_data['_c'] assert not hasattr(instance_data, '_c') instance_data.a = 1000 instance_data['a'] = 2000 assert instance_data['a'] == 2000 assert instance_data.a == 2000 assert instance_data.get('a') == instance_data['a'] == instance_data.a instance_data._meta = 1000 assert '_meta' in instance_data.keys() if torch.cuda.is_available(): instance_data.bbox = torch.ones(2, 3, 4, 5).cuda() instance_data.score = torch.ones(2, 3, 4, 4) else: instance_data.bbox = torch.ones(2, 3, 4, 5) assert len(instance_data.new().keys()) == 0 with pytest.raises(AttributeError): instance_data.img_size = 100 for k, v in instance_data.items(): if k == 'bbox': assert isinstance(v, torch.Tensor) assert 'a' in instance_data instance_data.pop('a') assert 'a' not in instance_data cpu_instance_data = instance_data.cpu() for k, v in cpu_instance_data.items(): if isinstance(v, torch.Tensor): assert not v.is_cuda assert isinstance(cpu_instance_data.numpy().bbox, np.ndarray) if torch.cuda.is_available(): cuda_resutls = instance_data.cuda() for k, v in cuda_resutls.items(): if isinstance(v, torch.Tensor): assert v.is_cuda def test_instance_data(): meta_info = dict( img_size=(256, 256), path='dadfaff', scale_factor=np.array([1.5, 1.5, 1, 1])) data = dict( bboxes=torch.rand(4, 4), masks=torch.rand(4, 2, 2), labels=np.random.rand(4), size=[(i, i) for i in range(4)]) # test init instance_data = InstanceData(meta_info) assert 'path' in instance_data instance_data = InstanceData(meta_info, data=data) assert len(instance_data) == 4 instance_data.set_data(data) assert len(instance_data) == 4 meta_info = copy.deepcopy(meta_info) meta_info['img_name'] = 'flag' # test newinstance_data new_instance_data = instance_data.new(meta_info=meta_info) for k, v in new_instance_data.meta_info_items(): if k in instance_data: _equal(v, instance_data[k]) else: assert _equal(v, meta_info[k]) assert k == 'img_name' # meta info is immutable with pytest.raises(KeyError): meta_info = copy.deepcopy(meta_info) meta_info['path'] = 'fdasfdsd' instance_data.new(meta_info=meta_info) # data fields should have same length with pytest.raises(AssertionError): temp_data = copy.deepcopy(data) temp_data['bboxes'] = torch.rand(5, 4) instance_data.new(data=temp_data) temp_data = copy.deepcopy(data) temp_data['scores'] = torch.rand(4) new_instance_data = instance_data.new(data=temp_data) for k, v in new_instance_data.items(): if k in instance_data: _equal(v, instance_data[k]) else: assert k == 'scores' assert _equal(v, temp_data[k]) instance_data = instance_data.new() # test __setattr__ # '_meta_info_field', '_data_fields' is immutable. with pytest.raises(AttributeError): instance_data._data_fields = dict() with pytest.raises(AttributeError): instance_data._data_fields = dict() # all attribute in instance_data_field should be # (torch.Tensor, np.ndarray, list)) with pytest.raises(AssertionError): instance_data.a = 1000 # instance_data field should has same length new_instance_data = instance_data.new() new_instance_data.det_bbox = torch.rand(100, 4) new_instance_data.det_label = torch.arange(100) with pytest.raises(AssertionError): new_instance_data.scores = torch.rand(101, 1) new_instance_data.none = [None] * 100 with pytest.raises(AssertionError): new_instance_data.scores = [None] * 101 new_instance_data.numpy_det = np.random.random([100, 1]) with pytest.raises(AssertionError): new_instance_data.scores = np.random.random([101, 1]) # isinstance(str, slice, int, torch.LongTensor, torch.BoolTensor) item = torch.Tensor([1, 2, 3, 4]) with pytest.raises(AssertionError): new_instance_data[item] len(new_instance_data[item.long()]) == 1 # when input is a bool tensor, The shape of # the input at index 0 should equal to # the value length in instance_data_field with pytest.raises(AssertionError): new_instance_data[item.bool()] for i in range(len(new_instance_data)): assert new_instance_data[i].det_label == i assert len(new_instance_data[i]) == 1 # assert the index should in 0 ~ len(instance_data) -1 with pytest.raises(IndexError): new_instance_data[101] # assert the index should not be an empty tensor new_new_instance_data = new_instance_data.new() with pytest.raises(AssertionError): new_new_instance_data[0] # test str with pytest.raises(AssertionError): instance_data.img_size_dummmy = meta_info['img_size'] # test slice ten_ressults = new_instance_data[:10] len(ten_ressults) == 10 for v in ten_ressults.values(): assert len(v) == 10 # test Longtensor long_tensor = torch.randint(100, (50, )) long_index_instance_data = new_instance_data[long_tensor] assert len(long_index_instance_data) == len(long_tensor) for key, value in long_index_instance_data.items(): if not isinstance(value, list): assert (long_index_instance_data[key] == new_instance_data[key] [long_tensor]).all() else: len(long_tensor) == len(value) # test bool tensor bool_tensor = torch.rand(100) > 0.5 bool_index_instance_data = new_instance_data[bool_tensor] assert len(bool_index_instance_data) == bool_tensor.sum() for key, value in bool_index_instance_data.items(): if not isinstance(value, list): assert (bool_index_instance_data[key] == new_instance_data[key] [bool_tensor]).all() else: assert len(value) == bool_tensor.sum() num_instance = 1000 instance_data_list = [] # assert len(instance_lists) > 0 with pytest.raises(AssertionError): instance_data.cat(instance_data_list) for _ in range(2): instance_data['bbox'] = torch.rand(num_instance, 4) instance_data['label'] = torch.rand(num_instance, 1) instance_data['mask'] = torch.rand(num_instance, 224, 224) instance_data['instances_infos'] = [1] * num_instance instance_data['cpu_bbox'] = np.random.random((num_instance, 4)) if torch.cuda.is_available(): instance_data.cuda_tensor = torch.rand(num_instance).cuda() assert instance_data.cuda_tensor.is_cuda cuda_instance_data = instance_data.cuda() assert cuda_instance_data.cuda_tensor.is_cuda assert len(instance_data[0]) == 1 with pytest.raises(IndexError): return instance_data[num_instance + 1] with pytest.raises(AssertionError): instance_data.centerness = torch.rand(num_instance + 1, 1) mask_tensor = torch.rand(num_instance) > 0.5 length = mask_tensor.sum() assert len(instance_data[mask_tensor]) == length index_tensor = torch.LongTensor([1, 5, 8, 110, 399]) length = len(index_tensor) assert len(instance_data[index_tensor]) == length instance_data_list.append(instance_data) cat_resutls = InstanceData.cat(instance_data_list) assert len(cat_resutls) == num_instance * 2 instances = InstanceData(data=dict(bboxes=torch.rand(4, 4))) # cat only single instance assert len(InstanceData.cat([instances])) == 4 ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_hook.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import logging import shutil import sys import tempfile from unittest.mock import MagicMock, Mock, call, patch import numpy as np import pytest import torch import torch.nn as nn from mmcv.runner import (CheckpointHook, IterTimerHook, PaviLoggerHook, build_runner) from torch.nn.init import constant_ from torch.utils.data import DataLoader, Dataset from mmdet.core.hook import ExpMomentumEMAHook, YOLOXLrUpdaterHook from mmdet.core.hook.sync_norm_hook import SyncNormHook from mmdet.core.hook.sync_random_size_hook import SyncRandomSizeHook def _build_demo_runner_without_hook(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimziers=False): class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(2, 1) self.conv = nn.Conv2d(3, 3, 3) def forward(self, x): return self.linear(x) def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) def val_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) model = Model() if multi_optimziers: optimizer = { 'model1': torch.optim.SGD(model.linear.parameters(), lr=0.02, momentum=0.95), 'model2': torch.optim.SGD(model.conv.parameters(), lr=0.01, momentum=0.9), } else: optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95) tmp_dir = tempfile.mkdtemp() runner = build_runner( dict(type=runner_type), default_args=dict( model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), max_epochs=max_epochs, max_iters=max_iters)) return runner def _build_demo_runner(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimziers=False): log_config = dict( interval=1, hooks=[ dict(type='TextLoggerHook'), ]) runner = _build_demo_runner_without_hook(runner_type, max_epochs, max_iters, multi_optimziers) runner.register_checkpoint_hook(dict(interval=1)) runner.register_logger_hooks(log_config) return runner @pytest.mark.parametrize('multi_optimziers', (True, False)) def test_yolox_lrupdater_hook(multi_optimziers): """xdoctest -m tests/test_hooks.py test_cosine_runner_hook.""" # Only used to prevent program errors YOLOXLrUpdaterHook(0, min_lr_ratio=0.05) sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(multi_optimziers=multi_optimziers) hook_cfg = dict( type='YOLOXLrUpdaterHook', warmup='exp', by_epoch=False, warmup_by_epoch=True, warmup_ratio=1, warmup_iters=5, # 5 epoch num_last_epochs=15, min_lr_ratio=0.05) runner.register_hook_from_cfg(hook_cfg) runner.register_hook_from_cfg(dict(type='IterTimerHook')) runner.register_hook(IterTimerHook()) # add pavi hook hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) # TODO: use a more elegant way to check values assert hasattr(hook, 'writer') if multi_optimziers: calls = [ call( 'train', { 'learning_rate/model1': 8.000000000000001e-06, 'learning_rate/model2': 4.000000000000001e-06, 'momentum/model1': 0.95, 'momentum/model2': 0.9 }, 1), call( 'train', { 'learning_rate/model1': 0.00039200000000000004, 'learning_rate/model2': 0.00019600000000000002, 'momentum/model1': 0.95, 'momentum/model2': 0.9 }, 7), call( 'train', { 'learning_rate/model1': 0.0008000000000000001, 'learning_rate/model2': 0.0004000000000000001, 'momentum/model1': 0.95, 'momentum/model2': 0.9 }, 10) ] else: calls = [ call('train', { 'learning_rate': 8.000000000000001e-06, 'momentum': 0.95 }, 1), call('train', { 'learning_rate': 0.00039200000000000004, 'momentum': 0.95 }, 7), call('train', { 'learning_rate': 0.0008000000000000001, 'momentum': 0.95 }, 10) ] hook.writer.add_scalars.assert_has_calls(calls, any_order=True) def test_ema_hook(): """xdoctest -m tests/test_hooks.py test_ema_hook.""" class DemoModel(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d( in_channels=1, out_channels=2, kernel_size=1, padding=1, bias=True) self.bn = nn.BatchNorm2d(2) self._init_weight() def _init_weight(self): constant_(self.conv.weight, 0) constant_(self.conv.bias, 0) constant_(self.bn.weight, 0) constant_(self.bn.bias, 0) def forward(self, x): return self.bn(self.conv(x)).sum() def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) def val_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) loader = DataLoader(torch.ones((1, 1, 1, 1))) runner = _build_demo_runner() demo_model = DemoModel() runner.model = demo_model ema_hook = ExpMomentumEMAHook( momentum=0.0002, total_iter=1, skip_buffers=True, interval=2, resume_from=None) checkpointhook = CheckpointHook(interval=1, by_epoch=True) runner.register_hook(ema_hook, priority='HIGHEST') runner.register_hook(checkpointhook) runner.run([loader, loader], [('train', 1), ('val', 1)]) checkpoint = torch.load(f'{runner.work_dir}/epoch_1.pth') num_eam_params = 0 for name, value in checkpoint['state_dict'].items(): if 'ema' in name: num_eam_params += 1 value.fill_(1) assert num_eam_params == 4 torch.save(checkpoint, f'{runner.work_dir}/epoch_1.pth') work_dir = runner.work_dir resume_ema_hook = ExpMomentumEMAHook( momentum=0.5, total_iter=10, skip_buffers=True, interval=1, resume_from=f'{work_dir}/epoch_1.pth') runner = _build_demo_runner(max_epochs=2) runner.model = demo_model runner.register_hook(resume_ema_hook, priority='HIGHEST') checkpointhook = CheckpointHook(interval=1, by_epoch=True) runner.register_hook(checkpointhook) runner.run([loader, loader], [('train', 1), ('val', 1)]) checkpoint = torch.load(f'{runner.work_dir}/epoch_2.pth') num_eam_params = 0 desired_output = [0.9094, 0.9094] for name, value in checkpoint['state_dict'].items(): if 'ema' in name: num_eam_params += 1 assert value.sum() == 2 else: if ('weight' in name) or ('bias' in name): np.allclose(value.data.cpu().numpy().reshape(-1), desired_output, 1e-4) assert num_eam_params == 4 shutil.rmtree(runner.work_dir) shutil.rmtree(work_dir) def test_sync_norm_hook(): # Only used to prevent program errors SyncNormHook() loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner() runner.register_hook_from_cfg(dict(type='SyncNormHook')) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) def test_sync_random_size_hook(): # Only used to prevent program errors SyncRandomSizeHook() class DemoDataset(Dataset): def __getitem__(self, item): return torch.ones(2) def __len__(self): return 5 def update_dynamic_scale(self, dynamic_scale): pass loader = DataLoader(DemoDataset()) runner = _build_demo_runner() runner.register_hook_from_cfg( dict(type='SyncRandomSizeHook', device='cpu')) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) if torch.cuda.is_available(): runner = _build_demo_runner() runner.register_hook_from_cfg( dict(type='SyncRandomSizeHook', device='cuda')) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) @pytest.mark.parametrize('set_loss', [ dict(set_loss_nan=False, set_loss_inf=False), dict(set_loss_nan=True, set_loss_inf=False), dict(set_loss_nan=False, set_loss_inf=True) ]) def test_check_invalid_loss_hook(set_loss): # Check whether loss is valid during training. class DemoModel(nn.Module): def __init__(self, set_loss_nan=False, set_loss_inf=False): super().__init__() self.set_loss_nan = set_loss_nan self.set_loss_inf = set_loss_inf self.linear = nn.Linear(2, 1) def forward(self, x): return self.linear(x) def train_step(self, x, optimizer, **kwargs): if self.set_loss_nan: return dict(loss=torch.tensor(float('nan'))) elif self.set_loss_inf: return dict(loss=torch.tensor(float('inf'))) else: return dict(loss=self(x)) loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner() demo_model = DemoModel(**set_loss) runner.model = demo_model runner.register_hook_from_cfg( dict(type='CheckInvalidLossHook', interval=1)) if not set_loss['set_loss_nan'] \ and not set_loss['set_loss_inf']: # check loss is valid runner.run([loader], [('train', 1)]) else: # check loss is nan or inf with pytest.raises(AssertionError): runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) def test_set_epoch_info_hook(): """Test SetEpochInfoHook.""" class DemoModel(nn.Module): def __init__(self): super().__init__() self.epoch = 0 self.linear = nn.Linear(2, 1) def forward(self, x): return self.linear(x) def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) def set_epoch(self, epoch): self.epoch = epoch loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner(max_epochs=3) demo_model = DemoModel() runner.model = demo_model runner.register_hook_from_cfg(dict(type='SetEpochInfoHook')) runner.run([loader], [('train', 1)]) assert demo_model.epoch == 2 def test_memory_profiler_hook(): from collections import namedtuple # test ImportError without psutil and memory_profiler with pytest.raises(ImportError): from mmdet.core.hook import MemoryProfilerHook MemoryProfilerHook(1) # test ImportError without memory_profiler sys.modules['psutil'] = MagicMock() with pytest.raises(ImportError): from mmdet.core.hook import MemoryProfilerHook MemoryProfilerHook(1) sys.modules['memory_profiler'] = MagicMock() def _mock_virtual_memory(): virtual_memory_type = namedtuple( 'virtual_memory', ['total', 'available', 'percent', 'used']) return virtual_memory_type( total=270109085696, available=250416816128, percent=7.3, used=17840881664) def _mock_swap_memory(): swap_memory_type = namedtuple('swap_memory', [ 'total', 'used', 'percent', ]) return swap_memory_type(total=8589930496, used=0, percent=0.0) def _mock_memory_usage(): return [40.22265625] mock_virtual_memory = Mock(return_value=_mock_virtual_memory()) mock_swap_memory = Mock(return_value=_mock_swap_memory()) mock_memory_usage = Mock(return_value=_mock_memory_usage()) @patch('psutil.swap_memory', mock_swap_memory) @patch('psutil.virtual_memory', mock_virtual_memory) @patch('memory_profiler.memory_usage', mock_memory_usage) def _test_memory_profiler_hook(): from mmdet.core.hook import MemoryProfilerHook hook = MemoryProfilerHook(1) runner = _build_demo_runner() assert not mock_memory_usage.called assert not mock_swap_memory.called assert not mock_memory_usage.called hook.after_iter(runner) assert mock_memory_usage.called assert mock_swap_memory.called assert mock_memory_usage.called _test_memory_profiler_hook() ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_layer_decay_optimizer_constructor.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmdet.core.optimizers import LearningRateDecayOptimizerConstructor base_lr = 1 decay_rate = 2 base_wd = 0.05 weight_decay = 0.05 expected_stage_wise_lr_wd_convnext = [{ 'weight_decay': 0.0, 'lr_scale': 128 }, { 'weight_decay': 0.0, 'lr_scale': 1 }, { 'weight_decay': 0.05, 'lr_scale': 64 }, { 'weight_decay': 0.0, 'lr_scale': 64 }, { 'weight_decay': 0.05, 'lr_scale': 32 }, { 'weight_decay': 0.0, 'lr_scale': 32 }, { 'weight_decay': 0.05, 'lr_scale': 16 }, { 'weight_decay': 0.0, 'lr_scale': 16 }, { 'weight_decay': 0.05, 'lr_scale': 8 }, { 'weight_decay': 0.0, 'lr_scale': 8 }, { 'weight_decay': 0.05, 'lr_scale': 128 }, { 'weight_decay': 0.05, 'lr_scale': 1 }] expected_layer_wise_lr_wd_convnext = [{ 'weight_decay': 0.0, 'lr_scale': 128 }, { 'weight_decay': 0.0, 'lr_scale': 1 }, { 'weight_decay': 0.05, 'lr_scale': 64 }, { 'weight_decay': 0.0, 'lr_scale': 64 }, { 'weight_decay': 0.05, 'lr_scale': 32 }, { 'weight_decay': 0.0, 'lr_scale': 32 }, { 'weight_decay': 0.05, 'lr_scale': 16 }, { 'weight_decay': 0.0, 'lr_scale': 16 }, { 'weight_decay': 0.05, 'lr_scale': 2 }, { 'weight_decay': 0.0, 'lr_scale': 2 }, { 'weight_decay': 0.05, 'lr_scale': 128 }, { 'weight_decay': 0.05, 'lr_scale': 1 }] class ToyConvNeXt(nn.Module): def __init__(self): super().__init__() self.stages = nn.ModuleList() for i in range(4): stage = nn.Sequential(ConvModule(3, 4, kernel_size=1, bias=True)) self.stages.append(stage) self.norm0 = nn.BatchNorm2d(2) # add some variables to meet unit test coverate rate self.cls_token = nn.Parameter(torch.ones(1)) self.mask_token = nn.Parameter(torch.ones(1)) self.pos_embed = nn.Parameter(torch.ones(1)) self.stem_norm = nn.Parameter(torch.ones(1)) self.downsample_norm0 = nn.BatchNorm2d(2) self.downsample_norm1 = nn.BatchNorm2d(2) self.downsample_norm2 = nn.BatchNorm2d(2) self.lin = nn.Parameter(torch.ones(1)) self.lin.requires_grad = False self.downsample_layers = nn.ModuleList() for _ in range(4): stage = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=True)) self.downsample_layers.append(stage) class ToyDetector(nn.Module): def __init__(self, backbone): super().__init__() self.backbone = backbone self.head = nn.Conv2d(2, 2, kernel_size=1, groups=2) class PseudoDataParallel(nn.Module): def __init__(self, model): super().__init__() self.module = model def check_optimizer_lr_wd(optimizer, gt_lr_wd): assert isinstance(optimizer, torch.optim.AdamW) assert optimizer.defaults['lr'] == base_lr assert optimizer.defaults['weight_decay'] == base_wd param_groups = optimizer.param_groups print(param_groups) assert len(param_groups) == len(gt_lr_wd) for i, param_dict in enumerate(param_groups): assert param_dict['weight_decay'] == gt_lr_wd[i]['weight_decay'] assert param_dict['lr_scale'] == gt_lr_wd[i]['lr_scale'] assert param_dict['lr_scale'] == param_dict['lr'] def test_learning_rate_decay_optimizer_constructor(): # Test lr wd for ConvNeXT backbone = ToyConvNeXt() model = PseudoDataParallel(ToyDetector(backbone)) optimizer_cfg = dict( type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05) # stagewise decay stagewise_paramwise_cfg = dict( decay_rate=decay_rate, decay_type='stage_wise', num_layers=6) optim_constructor = LearningRateDecayOptimizerConstructor( optimizer_cfg, stagewise_paramwise_cfg) optimizer = optim_constructor(model) check_optimizer_lr_wd(optimizer, expected_stage_wise_lr_wd_convnext) # layerwise decay layerwise_paramwise_cfg = dict( decay_rate=decay_rate, decay_type='layer_wise', num_layers=6) optim_constructor = LearningRateDecayOptimizerConstructor( optimizer_cfg, layerwise_paramwise_cfg) optimizer = optim_constructor(model) check_optimizer_lr_wd(optimizer, expected_layer_wise_lr_wd_convnext) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_logger.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import pytest from mmdet.utils import get_caller_name, log_img_scale def callee_func(): caller_name = get_caller_name() return caller_name class CallerClassForTest: def __init__(self): self.caller_name = callee_func() def test_get_caller_name(): # test the case that caller is a function caller_name = callee_func() assert caller_name == 'test_get_caller_name' # test the case that caller is a method in a class caller_class = CallerClassForTest() assert caller_class.caller_name == 'CallerClassForTest.__init__' def test_log_img_scale(): img_scale = (800, 1333) done_logging = log_img_scale(img_scale) assert done_logging img_scale = (1333, 800) done_logging = log_img_scale(img_scale, shape_order='wh') assert done_logging with pytest.raises(ValueError): img_scale = (1333, 800) done_logging = log_img_scale(img_scale, shape_order='xywh') img_scale = (640, 640) done_logging = log_img_scale(img_scale, skip_square=False) assert done_logging img_scale = (640, 640) done_logging = log_img_scale(img_scale, skip_square=True) assert not done_logging ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_masks.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pytest import torch from mmdet.core import BitmapMasks, PolygonMasks, mask2bbox def dummy_raw_bitmap_masks(size): """ Args: size (tuple): expected shape of dummy masks, (H, W) or (N, H, W) Return: ndarray: dummy mask """ return np.random.randint(0, 2, size, dtype=np.uint8) def dummy_raw_polygon_masks(size): """ Args: size (tuple): expected shape of dummy masks, (N, H, W) Return: list[list[ndarray]]: dummy mask """ num_obj, height, width = size polygons = [] for _ in range(num_obj): num_points = np.random.randint(5) * 2 + 6 polygons.append([np.random.uniform(0, min(height, width), num_points)]) return polygons def dummy_bboxes(num, max_height, max_width): x1y1 = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2)) wh = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2)) x2y2 = x1y1 + wh return np.concatenate([x1y1, x2y2], axis=1).squeeze().astype(np.float32) def test_bitmap_mask_init(): # init with empty ndarray masks raw_masks = np.empty((0, 28, 28), dtype=np.uint8) bitmap_masks = BitmapMasks(raw_masks, 28, 28) assert len(bitmap_masks) == 0 assert bitmap_masks.height == 28 assert bitmap_masks.width == 28 # init with empty list masks raw_masks = [] bitmap_masks = BitmapMasks(raw_masks, 28, 28) assert len(bitmap_masks) == 0 assert bitmap_masks.height == 28 assert bitmap_masks.width == 28 # init with ndarray masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) assert len(bitmap_masks) == 3 assert bitmap_masks.height == 28 assert bitmap_masks.width == 28 # init with list masks contain 3 instances raw_masks = [dummy_raw_bitmap_masks((28, 28)) for _ in range(3)] bitmap_masks = BitmapMasks(raw_masks, 28, 28) assert len(bitmap_masks) == 3 assert bitmap_masks.height == 28 assert bitmap_masks.width == 28 # init with raw masks of unsupported type with pytest.raises(AssertionError): raw_masks = [[dummy_raw_bitmap_masks((28, 28))]] BitmapMasks(raw_masks, 28, 28) def test_bitmap_mask_rescale(): # rescale with empty bitmap masks raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) rescaled_masks = bitmap_masks.rescale((56, 72)) assert len(rescaled_masks) == 0 assert rescaled_masks.height == 56 assert rescaled_masks.width == 56 # rescale with bitmap masks contain 1 instances raw_masks = np.array([[[1, 0, 0, 0], [0, 1, 0, 1]]]) bitmap_masks = BitmapMasks(raw_masks, 2, 4) rescaled_masks = bitmap_masks.rescale((8, 8)) assert len(rescaled_masks) == 1 assert rescaled_masks.height == 4 assert rescaled_masks.width == 8 truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 1]]]) assert (rescaled_masks.masks == truth).all() def test_bitmap_mask_resize(): # resize with empty bitmap masks raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) resized_masks = bitmap_masks.resize((56, 72)) assert len(resized_masks) == 0 assert resized_masks.height == 56 assert resized_masks.width == 72 # resize with bitmap masks contain 1 instances raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...] bitmap_masks = BitmapMasks(raw_masks, 4, 4) resized_masks = bitmap_masks.resize((8, 8)) assert len(resized_masks) == 1 assert resized_masks.height == 8 assert resized_masks.width == 8 truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]]) assert (resized_masks.masks == truth).all() # resize to non-square raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...] bitmap_masks = BitmapMasks(raw_masks, 4, 4) resized_masks = bitmap_masks.resize((4, 8)) assert len(resized_masks) == 1 assert resized_masks.height == 4 assert resized_masks.width == 8 truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1]]]) assert (resized_masks.masks == truth).all() def test_bitmap_mask_get_bboxes(): # resize with empty bitmap masks raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) bboxes = bitmap_masks.get_bboxes() assert len(bboxes) == 0 # resize with bitmap masks contain 1 instances raw_masks = np.array([[[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]]) bitmap_masks = BitmapMasks(raw_masks, 8, 8) bboxes = bitmap_masks.get_bboxes() assert len(bboxes) == 1 truth = np.array([[1, 1, 6, 6]]) assert (bboxes == truth).all() # resize to non-square raw_masks = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]]) bitmap_masks = BitmapMasks(raw_masks, 4, 8) bboxes = bitmap_masks.get_bboxes() truth = np.array([[0, 0, 6, 3]]) assert (bboxes == truth).all() def test_bitmap_mask_flip(): # flip with empty bitmap masks raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) flipped_masks = bitmap_masks.flip(flip_direction='horizontal') assert len(flipped_masks) == 0 assert flipped_masks.height == 28 assert flipped_masks.width == 28 # horizontally flip with bitmap masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) flipped_masks = bitmap_masks.flip(flip_direction='horizontal') flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal') assert flipped_masks.masks.shape == (3, 28, 28) assert (bitmap_masks.masks == flipped_flipped_masks.masks).all() assert (flipped_masks.masks == raw_masks[:, :, ::-1]).all() # vertically flip with bitmap masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) flipped_masks = bitmap_masks.flip(flip_direction='vertical') flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical') assert len(flipped_masks) == 3 assert flipped_masks.height == 28 assert flipped_masks.width == 28 assert (bitmap_masks.masks == flipped_flipped_masks.masks).all() assert (flipped_masks.masks == raw_masks[:, ::-1, :]).all() # diagonal flip with bitmap masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) flipped_masks = bitmap_masks.flip(flip_direction='diagonal') flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal') assert len(flipped_masks) == 3 assert flipped_masks.height == 28 assert flipped_masks.width == 28 assert (bitmap_masks.masks == flipped_flipped_masks.masks).all() assert (flipped_masks.masks == raw_masks[:, ::-1, ::-1]).all() def test_bitmap_mask_pad(): # pad with empty bitmap masks raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) padded_masks = bitmap_masks.pad((56, 56)) assert len(padded_masks) == 0 assert padded_masks.height == 56 assert padded_masks.width == 56 # pad with bitmap masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) padded_masks = bitmap_masks.pad((56, 56)) assert len(padded_masks) == 3 assert padded_masks.height == 56 assert padded_masks.width == 56 assert (padded_masks.masks[:, 28:, 28:] == 0).all() def test_bitmap_mask_crop(): # crop with empty bitmap masks dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int) raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) cropped_masks = bitmap_masks.crop(dummy_bbox) assert len(cropped_masks) == 0 assert cropped_masks.height == 17 assert cropped_masks.width == 10 # crop with bitmap masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) cropped_masks = bitmap_masks.crop(dummy_bbox) assert len(cropped_masks) == 3 assert cropped_masks.height == 17 assert cropped_masks.width == 10 x1, y1, x2, y2 = dummy_bbox assert (cropped_masks.masks == raw_masks[:, y1:y2, x1:x2]).all() # crop with invalid bbox with pytest.raises(AssertionError): dummy_bbox = dummy_bboxes(2, 28, 28) bitmap_masks.crop(dummy_bbox) def test_bitmap_mask_crop_and_resize(): dummy_bbox = dummy_bboxes(5, 28, 28) inds = np.random.randint(0, 3, (5, )) # crop and resize with empty bitmap masks raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) cropped_resized_masks = bitmap_masks.crop_and_resize( dummy_bbox, (56, 56), inds) assert len(cropped_resized_masks) == 0 assert cropped_resized_masks.height == 56 assert cropped_resized_masks.width == 56 # crop and resize with bitmap masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) cropped_resized_masks = bitmap_masks.crop_and_resize( dummy_bbox, (56, 56), inds) assert len(cropped_resized_masks) == 5 assert cropped_resized_masks.height == 56 assert cropped_resized_masks.width == 56 def test_bitmap_mask_expand(): # expand with empty bitmap masks raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) expanded_masks = bitmap_masks.expand(56, 56, 12, 14) assert len(expanded_masks) == 0 assert expanded_masks.height == 56 assert expanded_masks.width == 56 # expand with bitmap masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) expanded_masks = bitmap_masks.expand(56, 56, 12, 14) assert len(expanded_masks) == 3 assert expanded_masks.height == 56 assert expanded_masks.width == 56 assert (expanded_masks.masks[:, :12, :14] == 0).all() assert (expanded_masks.masks[:, 12 + 28:, 14 + 28:] == 0).all() def test_bitmap_mask_area(): # area of empty bitmap mask raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) assert bitmap_masks.areas.sum() == 0 # area of bitmap masks contain 3 instances raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) areas = bitmap_masks.areas assert len(areas) == 3 assert (areas == raw_masks.sum((1, 2))).all() def test_bitmap_mask_to_ndarray(): # empty bitmap masks to ndarray raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) ndarray_masks = bitmap_masks.to_ndarray() assert isinstance(ndarray_masks, np.ndarray) assert ndarray_masks.shape == (0, 28, 28) # bitmap masks contain 3 instances to ndarray raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) ndarray_masks = bitmap_masks.to_ndarray() assert isinstance(ndarray_masks, np.ndarray) assert ndarray_masks.shape == (3, 28, 28) assert (ndarray_masks == raw_masks).all() def test_bitmap_mask_to_tensor(): # empty bitmap masks to tensor raw_masks = dummy_raw_bitmap_masks((0, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu') assert isinstance(tensor_masks, torch.Tensor) assert tensor_masks.shape == (0, 28, 28) # bitmap masks contain 3 instances to tensor raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu') assert isinstance(tensor_masks, torch.Tensor) assert tensor_masks.shape == (3, 28, 28) assert (tensor_masks.numpy() == raw_masks).all() def test_bitmap_mask_index(): raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) assert (bitmap_masks[0].masks == raw_masks[0]).all() assert (bitmap_masks[range(2)].masks == raw_masks[range(2)]).all() def test_bitmap_mask_iter(): raw_masks = dummy_raw_bitmap_masks((3, 28, 28)) bitmap_masks = BitmapMasks(raw_masks, 28, 28) for i, bitmap_mask in enumerate(bitmap_masks): assert bitmap_mask.shape == (28, 28) assert (bitmap_mask == raw_masks[i]).all() def test_polygon_mask_init(): # init with empty masks raw_masks = [] polygon_masks = BitmapMasks(raw_masks, 28, 28) assert len(polygon_masks) == 0 assert polygon_masks.height == 28 assert polygon_masks.width == 28 # init with masks contain 3 instances raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) assert isinstance(polygon_masks.masks, list) assert isinstance(polygon_masks.masks[0], list) assert isinstance(polygon_masks.masks[0][0], np.ndarray) assert len(polygon_masks) == 3 assert polygon_masks.height == 28 assert polygon_masks.width == 28 assert polygon_masks.to_ndarray().shape == (3, 28, 28) # init with raw masks of unsupported type with pytest.raises(AssertionError): raw_masks = [[[]]] PolygonMasks(raw_masks, 28, 28) raw_masks = [dummy_raw_polygon_masks((3, 28, 28))] PolygonMasks(raw_masks, 28, 28) def test_polygon_mask_rescale(): # rescale with empty polygon masks raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) rescaled_masks = polygon_masks.rescale((56, 72)) assert len(rescaled_masks) == 0 assert rescaled_masks.height == 56 assert rescaled_masks.width == 56 assert rescaled_masks.to_ndarray().shape == (0, 56, 56) # rescale with polygon masks contain 3 instances raw_masks = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]] polygon_masks = PolygonMasks(raw_masks, 5, 5) rescaled_masks = polygon_masks.rescale((12, 10)) assert len(rescaled_masks) == 1 assert rescaled_masks.height == 10 assert rescaled_masks.width == 10 assert rescaled_masks.to_ndarray().shape == (1, 10, 10) truth = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], np.uint8) assert (rescaled_masks.to_ndarray() == truth).all() def test_polygon_mask_resize(): # resize with empty polygon masks raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) resized_masks = polygon_masks.resize((56, 72)) assert len(resized_masks) == 0 assert resized_masks.height == 56 assert resized_masks.width == 72 assert resized_masks.to_ndarray().shape == (0, 56, 72) assert len(resized_masks.get_bboxes()) == 0 # resize with polygon masks contain 1 instance 1 part raw_masks1 = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]] polygon_masks1 = PolygonMasks(raw_masks1, 5, 5) resized_masks1 = polygon_masks1.resize((10, 10)) assert len(resized_masks1) == 1 assert resized_masks1.height == 10 assert resized_masks1.width == 10 assert resized_masks1.to_ndarray().shape == (1, 10, 10) truth1 = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], np.uint8) assert (resized_masks1.to_ndarray() == truth1).all() bboxes = resized_masks1.get_bboxes() bbox_truth = np.array([[2, 2, 8, 8]]) assert (bboxes == bbox_truth).all() # resize with polygon masks contain 1 instance 2 part raw_masks2 = [[ np.array([0., 0., 1., 0., 1., 1.]), np.array([1., 1., 2., 1., 2., 2., 1., 2.]) ]] polygon_masks2 = PolygonMasks(raw_masks2, 3, 3) resized_masks2 = polygon_masks2.resize((6, 6)) assert len(resized_masks2) == 1 assert resized_masks2.height == 6 assert resized_masks2.width == 6 assert resized_masks2.to_ndarray().shape == (1, 6, 6) truth2 = np.array( [[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], np.uint8) assert (resized_masks2.to_ndarray() == truth2).all() # resize with polygon masks contain 2 instances raw_masks3 = [raw_masks1[0], raw_masks2[0]] polygon_masks3 = PolygonMasks(raw_masks3, 5, 5) resized_masks3 = polygon_masks3.resize((10, 10)) assert len(resized_masks3) == 2 assert resized_masks3.height == 10 assert resized_masks3.width == 10 assert resized_masks3.to_ndarray().shape == (2, 10, 10) truth3 = np.stack([truth1, np.pad(truth2, ((0, 4), (0, 4)), 'constant')]) assert (resized_masks3.to_ndarray() == truth3).all() # resize to non-square raw_masks4 = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]] polygon_masks4 = PolygonMasks(raw_masks4, 5, 5) resized_masks4 = polygon_masks4.resize((5, 10)) assert len(resized_masks4) == 1 assert resized_masks4.height == 5 assert resized_masks4.width == 10 assert resized_masks4.to_ndarray().shape == (1, 5, 10) truth4 = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], np.uint8) assert (resized_masks4.to_ndarray() == truth4).all() def test_polygon_mask_flip(): # flip with empty polygon masks raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) flipped_masks = polygon_masks.flip(flip_direction='horizontal') assert len(flipped_masks) == 0 assert flipped_masks.height == 28 assert flipped_masks.width == 28 assert flipped_masks.to_ndarray().shape == (0, 28, 28) # TODO: fixed flip correctness checking after v2.0_coord is merged # horizontally flip with polygon masks contain 3 instances raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) flipped_masks = polygon_masks.flip(flip_direction='horizontal') flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal') assert len(flipped_masks) == 3 assert flipped_masks.height == 28 assert flipped_masks.width == 28 assert flipped_masks.to_ndarray().shape == (3, 28, 28) assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray() ).all() # vertically flip with polygon masks contain 3 instances raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) flipped_masks = polygon_masks.flip(flip_direction='vertical') flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical') assert len(flipped_masks) == 3 assert flipped_masks.height == 28 assert flipped_masks.width == 28 assert flipped_masks.to_ndarray().shape == (3, 28, 28) assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray() ).all() # diagonal flip with polygon masks contain 3 instances raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) flipped_masks = polygon_masks.flip(flip_direction='diagonal') flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal') assert len(flipped_masks) == 3 assert flipped_masks.height == 28 assert flipped_masks.width == 28 assert flipped_masks.to_ndarray().shape == (3, 28, 28) assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray() ).all() def test_polygon_mask_crop(): dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int) # crop with empty polygon masks raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) cropped_masks = polygon_masks.crop(dummy_bbox) assert len(cropped_masks) == 0 assert cropped_masks.height == 17 assert cropped_masks.width == 10 assert cropped_masks.to_ndarray().shape == (0, 17, 10) # crop with polygon masks contain 1 instances raw_masks = [[np.array([1., 3., 5., 1., 5., 6., 1, 6])]] polygon_masks = PolygonMasks(raw_masks, 7, 7) bbox = np.array([0, 0, 3, 4]) cropped_masks = polygon_masks.crop(bbox) assert len(cropped_masks) == 1 assert cropped_masks.height == 4 assert cropped_masks.width == 3 assert cropped_masks.to_ndarray().shape == (1, 4, 3) truth = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 1, 1]]) assert (cropped_masks.to_ndarray() == truth).all() # crop with invalid bbox with pytest.raises(AssertionError): dummy_bbox = dummy_bboxes(2, 28, 28) polygon_masks.crop(dummy_bbox) def test_polygon_mask_pad(): # pad with empty polygon masks raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) padded_masks = polygon_masks.pad((56, 56)) assert len(padded_masks) == 0 assert padded_masks.height == 56 assert padded_masks.width == 56 assert padded_masks.to_ndarray().shape == (0, 56, 56) # pad with polygon masks contain 3 instances raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) padded_masks = polygon_masks.pad((56, 56)) assert len(padded_masks) == 3 assert padded_masks.height == 56 assert padded_masks.width == 56 assert padded_masks.to_ndarray().shape == (3, 56, 56) assert (padded_masks.to_ndarray()[:, 28:, 28:] == 0).all() def test_polygon_mask_expand(): with pytest.raises(NotImplementedError): raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) polygon_masks.expand(56, 56, 10, 17) def test_polygon_mask_crop_and_resize(): dummy_bbox = dummy_bboxes(5, 28, 28) inds = np.random.randint(0, 3, (5, )) # crop and resize with empty polygon masks raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) cropped_resized_masks = polygon_masks.crop_and_resize( dummy_bbox, (56, 56), inds) assert len(cropped_resized_masks) == 0 assert cropped_resized_masks.height == 56 assert cropped_resized_masks.width == 56 assert cropped_resized_masks.to_ndarray().shape == (0, 56, 56) # crop and resize with polygon masks contain 3 instances raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) cropped_resized_masks = polygon_masks.crop_and_resize( dummy_bbox, (56, 56), inds) assert len(cropped_resized_masks) == 5 assert cropped_resized_masks.height == 56 assert cropped_resized_masks.width == 56 assert cropped_resized_masks.to_ndarray().shape == (5, 56, 56) def test_polygon_mask_area(): # area of empty polygon masks raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) assert polygon_masks.areas.sum() == 0 # area of polygon masks contain 1 instance # here we hack a case that the gap between the area of bitmap and polygon # is minor raw_masks = [[np.array([1, 1, 5, 1, 3, 4])]] polygon_masks = PolygonMasks(raw_masks, 6, 6) polygon_area = polygon_masks.areas bitmap_area = polygon_masks.to_bitmap().areas assert len(polygon_area) == 1 assert np.isclose(polygon_area, bitmap_area).all() def test_polygon_mask_to_bitmap(): # polygon masks contain 3 instances to bitmap raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) bitmap_masks = polygon_masks.to_bitmap() assert (polygon_masks.to_ndarray() == bitmap_masks.to_ndarray()).all() def test_polygon_mask_to_ndarray(): # empty polygon masks to ndarray raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) ndarray_masks = polygon_masks.to_ndarray() assert isinstance(ndarray_masks, np.ndarray) assert ndarray_masks.shape == (0, 28, 28) # polygon masks contain 3 instances to ndarray raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) ndarray_masks = polygon_masks.to_ndarray() assert isinstance(ndarray_masks, np.ndarray) assert ndarray_masks.shape == (3, 28, 28) def test_polygon_to_tensor(): # empty polygon masks to tensor raw_masks = dummy_raw_polygon_masks((0, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) tensor_masks = polygon_masks.to_tensor(dtype=torch.uint8, device='cpu') assert isinstance(tensor_masks, torch.Tensor) assert tensor_masks.shape == (0, 28, 28) # polygon masks contain 3 instances to tensor raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) tensor_masks = polygon_masks.to_tensor(dtype=torch.uint8, device='cpu') assert isinstance(tensor_masks, torch.Tensor) assert tensor_masks.shape == (3, 28, 28) assert (tensor_masks.numpy() == polygon_masks.to_ndarray()).all() def test_polygon_mask_index(): raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) # index by integer polygon_masks[0] # index by list polygon_masks[[0, 1]] # index by ndarray polygon_masks[np.asarray([0, 1])] with pytest.raises(ValueError): # invalid index polygon_masks[torch.Tensor([1, 2])] def test_polygon_mask_iter(): raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) for i, polygon_mask in enumerate(polygon_masks): assert np.equal(polygon_mask, raw_masks[i]).all() def test_mask2bbox(): # no instance masks = torch.zeros((1, 20, 15), dtype=torch.bool) bboxes_empty_gt = torch.tensor([[0, 0, 0, 0]]).float() bboxes = mask2bbox(masks) assert torch.allclose(bboxes_empty_gt.float(), bboxes) # the entire mask is an instance bboxes_full_gt = torch.tensor([[0, 0, 15, 20]]).float() masks = torch.ones((1, 20, 15), dtype=torch.bool) bboxes = mask2bbox(masks) assert torch.allclose(bboxes_full_gt, bboxes) # a pentagon-shaped instance bboxes_gt = torch.tensor([[2, 2, 7, 6]]).float() masks = torch.zeros((1, 20, 15), dtype=torch.bool) masks[0, 2, 4] = True masks[0, 3, 3:6] = True masks[0, 4, 2:7] = True masks[0, 5, 2:7] = True bboxes = mask2bbox(masks) assert torch.allclose(bboxes_gt, bboxes) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_memory.py ================================================ import numpy as np import pytest import torch from mmdet.utils import AvoidOOM from mmdet.utils.memory import cast_tensor_type def test_avoidoom(): tensor = torch.from_numpy(np.random.random((20, 20))) if torch.cuda.is_available(): tensor = tensor.cuda() # get default result default_result = torch.mm(tensor, tensor.transpose(1, 0)) # when not occurred OOM error AvoidCudaOOM = AvoidOOM() result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor, tensor.transpose( 1, 0)) assert default_result.device == result.device and \ default_result.dtype == result.dtype and \ torch.equal(default_result, result) # calculate with fp16 and convert back to source type AvoidCudaOOM = AvoidOOM(test=True) result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor, tensor.transpose( 1, 0)) assert default_result.device == result.device and \ default_result.dtype == result.dtype and \ torch.allclose(default_result, result, 1e-3) # calculate on cpu and convert back to source device AvoidCudaOOM = AvoidOOM(test=True) result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor, tensor.transpose( 1, 0)) assert result.dtype == default_result.dtype and \ result.device == default_result.device and \ torch.allclose(default_result, result) # do not calculate on cpu and the outputs will be same as input AvoidCudaOOM = AvoidOOM(test=True, to_cpu=False) result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor, tensor.transpose( 1, 0)) assert result.dtype == default_result.dtype and \ result.device == default_result.device else: default_result = torch.mm(tensor, tensor.transpose(1, 0)) AvoidCudaOOM = AvoidOOM() result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor, tensor.transpose( 1, 0)) assert default_result.device == result.device and \ default_result.dtype == result.dtype and \ torch.equal(default_result, result) def test_cast_tensor_type(): inputs = torch.rand(10) if torch.cuda.is_available(): inputs = inputs.cuda() with pytest.raises(AssertionError): cast_tensor_type(inputs, src_type=None, dst_type=None) # input is a float out = cast_tensor_type(10., dst_type=torch.half) assert out == 10. and isinstance(out, float) # convert Tensor to fp16 and re-convert to fp32 fp16_out = cast_tensor_type(inputs, dst_type=torch.half) assert fp16_out.dtype == torch.half fp32_out = cast_tensor_type(fp16_out, dst_type=torch.float32) assert fp32_out.dtype == torch.float32 # input is a list list_input = [inputs, inputs] list_outs = cast_tensor_type(list_input, dst_type=torch.half) assert len(list_outs) == len(list_input) and \ isinstance(list_outs, list) for out in list_outs: assert out.dtype == torch.half # input is a dict dict_input = {'test1': inputs, 'test2': inputs} dict_outs = cast_tensor_type(dict_input, dst_type=torch.half) assert len(dict_outs) == len(dict_input) and \ isinstance(dict_outs, dict) # convert the input tensor to CPU and re-convert to GPU if torch.cuda.is_available(): cpu_device = torch.empty(0).device gpu_device = inputs.device cpu_out = cast_tensor_type(inputs, dst_type=cpu_device) assert cpu_out.device == cpu_device gpu_out = cast_tensor_type(inputs, dst_type=gpu_device) assert gpu_out.device == gpu_device ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_misc.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile import numpy as np import pytest import torch from mmdet.core.bbox import distance2bbox from mmdet.core.mask.structures import BitmapMasks, PolygonMasks from mmdet.core.utils import (center_of_mass, filter_scores_and_topk, flip_tensor, mask2ndarray, select_single_mlvl) from mmdet.utils import find_latest_checkpoint def dummy_raw_polygon_masks(size): """ Args: size (tuple): expected shape of dummy masks, (N, H, W) Return: list[list[ndarray]]: dummy mask """ num_obj, height, width = size polygons = [] for _ in range(num_obj): num_points = np.random.randint(5) * 2 + 6 polygons.append([np.random.uniform(0, min(height, width), num_points)]) return polygons def test_mask2ndarray(): raw_masks = np.ones((3, 28, 28)) bitmap_mask = BitmapMasks(raw_masks, 28, 28) output_mask = mask2ndarray(bitmap_mask) assert np.allclose(raw_masks, output_mask) raw_masks = dummy_raw_polygon_masks((3, 28, 28)) polygon_masks = PolygonMasks(raw_masks, 28, 28) output_mask = mask2ndarray(polygon_masks) assert output_mask.shape == (3, 28, 28) raw_masks = np.ones((3, 28, 28)) output_mask = mask2ndarray(raw_masks) assert np.allclose(raw_masks, output_mask) raw_masks = torch.ones((3, 28, 28)) output_mask = mask2ndarray(raw_masks) assert np.allclose(raw_masks, output_mask) # test unsupported type raw_masks = [] with pytest.raises(TypeError): output_mask = mask2ndarray(raw_masks) def test_distance2bbox(): point = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]]) distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.], [22., -29., 138., 61.], [54., -29., 170., 61.]]) expected_decode_bboxes = torch.Tensor([[74., 61., 75., 62.], [0., 104., 0., 112.], [100., 90., 100., 120.], [0., 120., 100., 120.]]) out_bbox = distance2bbox(point, distance, max_shape=(120, 100)) assert expected_decode_bboxes.allclose(out_bbox) out = distance2bbox(point, distance, max_shape=torch.Tensor((120, 100))) assert expected_decode_bboxes.allclose(out) batch_point = point.unsqueeze(0).repeat(2, 1, 1) batch_distance = distance.unsqueeze(0).repeat(2, 1, 1) batch_out = distance2bbox( batch_point, batch_distance, max_shape=(120, 100))[0] assert out.allclose(batch_out) batch_out = distance2bbox( batch_point, batch_distance, max_shape=[(120, 100), (120, 100)])[0] assert out.allclose(batch_out) batch_out = distance2bbox(point, batch_distance, max_shape=(120, 100))[0] assert out.allclose(batch_out) # test max_shape is not equal to batch with pytest.raises(AssertionError): distance2bbox( batch_point, batch_distance, max_shape=[(120, 100), (120, 100), (32, 32)]) rois = torch.zeros((0, 4)) deltas = torch.zeros((0, 4)) out = distance2bbox(rois, deltas, max_shape=(120, 100)) assert rois.shape == out.shape rois = torch.zeros((2, 0, 4)) deltas = torch.zeros((2, 0, 4)) out = distance2bbox(rois, deltas, max_shape=(120, 100)) assert rois.shape == out.shape @pytest.mark.parametrize('mask', [ torch.ones((28, 28)), torch.zeros((28, 28)), torch.rand(28, 28) > 0.5, torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]) ]) def test_center_of_mass(mask): center_h, center_w = center_of_mass(mask) if mask.shape[0] == 4: assert center_h == 1.5 assert center_w == 1.5 assert isinstance(center_h, torch.Tensor) \ and isinstance(center_w, torch.Tensor) assert 0 <= center_h <= 28 \ and 0 <= center_w <= 28 def test_flip_tensor(): img = np.random.random((1, 3, 10, 10)) src_tensor = torch.from_numpy(img) # test flip_direction parameter error with pytest.raises(AssertionError): flip_tensor(src_tensor, 'flip') # test tensor dimension with pytest.raises(AssertionError): flip_tensor(src_tensor[0], 'vertical') hfilp_tensor = flip_tensor(src_tensor, 'horizontal') expected_hflip_tensor = torch.from_numpy(img[..., ::-1, :].copy()) expected_hflip_tensor.allclose(hfilp_tensor) vfilp_tensor = flip_tensor(src_tensor, 'vertical') expected_vflip_tensor = torch.from_numpy(img[..., ::-1].copy()) expected_vflip_tensor.allclose(vfilp_tensor) diag_filp_tensor = flip_tensor(src_tensor, 'diagonal') expected_diag_filp_tensor = torch.from_numpy(img[..., ::-1, ::-1].copy()) expected_diag_filp_tensor.allclose(diag_filp_tensor) def test_select_single_mlvl(): mlvl_tensors = [torch.rand(2, 1, 10, 10)] * 5 mlvl_tensor_list = select_single_mlvl(mlvl_tensors, 1) assert len(mlvl_tensor_list) == 5 and mlvl_tensor_list[0].ndim == 3 def test_filter_scores_and_topk(): score = torch.tensor([[0.1, 0.3, 0.2], [0.12, 0.7, 0.9], [0.02, 0.8, 0.08], [0.4, 0.1, 0.08]]) bbox_pred = torch.tensor([[0.2, 0.3], [0.4, 0.7], [0.1, 0.1], [0.5, 0.1]]) score_thr = 0.15 nms_pre = 4 # test results type error with pytest.raises(NotImplementedError): filter_scores_and_topk(score, score_thr, nms_pre, (score, )) filtered_results = filter_scores_and_topk( score, score_thr, nms_pre, results=dict(bbox_pred=bbox_pred)) filtered_score, labels, keep_idxs, results = filtered_results assert filtered_score.allclose(torch.tensor([0.9, 0.8, 0.7, 0.4])) assert labels.allclose(torch.tensor([2, 1, 1, 0])) assert keep_idxs.allclose(torch.tensor([1, 2, 1, 3])) assert results['bbox_pred'].allclose( torch.tensor([[0.4, 0.7], [0.1, 0.1], [0.4, 0.7], [0.5, 0.1]])) def test_find_latest_checkpoint(): with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir latest = find_latest_checkpoint(path) # There are no checkpoints in the path. assert latest is None path = osp.join(tmpdir, 'none') latest = find_latest_checkpoint(path) # The path does not exist. assert latest is None with tempfile.TemporaryDirectory() as tmpdir: with open(osp.join(tmpdir, 'latest.pth'), 'w') as f: f.write('latest') path = tmpdir latest = find_latest_checkpoint(path) assert latest == osp.join(tmpdir, 'latest.pth') with tempfile.TemporaryDirectory() as tmpdir: with open(osp.join(tmpdir, 'iter_4000.pth'), 'w') as f: f.write('iter_4000') with open(osp.join(tmpdir, 'iter_8000.pth'), 'w') as f: f.write('iter_8000') path = tmpdir latest = find_latest_checkpoint(path) assert latest == osp.join(tmpdir, 'iter_8000.pth') with tempfile.TemporaryDirectory() as tmpdir: with open(osp.join(tmpdir, 'epoch_1.pth'), 'w') as f: f.write('epoch_1') with open(osp.join(tmpdir, 'epoch_2.pth'), 'w') as f: f.write('epoch_2') path = tmpdir latest = find_latest_checkpoint(path) assert latest == osp.join(tmpdir, 'epoch_2.pth') ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_nms.py ================================================ import pytest import torch from mmdet.core.post_processing import mask_matrix_nms def _create_mask(N, h, w): masks = torch.rand((N, h, w)) > 0.5 labels = torch.rand(N) scores = torch.rand(N) return masks, labels, scores def test_nms_input_errors(): with pytest.raises(AssertionError): mask_matrix_nms( torch.rand((10, 28, 28)), torch.rand(11), torch.rand(11)) with pytest.raises(AssertionError): masks = torch.rand((10, 28, 28)) mask_matrix_nms( masks, torch.rand(11), torch.rand(11), mask_area=masks.sum((1, 2)).float()[:8]) with pytest.raises(NotImplementedError): mask_matrix_nms( torch.rand((10, 28, 28)), torch.rand(10), torch.rand(10), kernel='None') # test an empty results masks, labels, scores = _create_mask(0, 28, 28) score, label, mask, keep_ind = \ mask_matrix_nms(masks, labels, scores) assert len(score) == len(label) == \ len(mask) == len(keep_ind) == 0 # do not use update_thr, nms_pre and max_num masks, labels, scores = _create_mask(1000, 28, 28) score, label, mask, keep_ind = \ mask_matrix_nms(masks, labels, scores) assert len(score) == len(label) == \ len(mask) == len(keep_ind) == 1000 # only use nms_pre score, label, mask, keep_ind = \ mask_matrix_nms(masks, labels, scores, nms_pre=500) assert len(score) == len(label) == \ len(mask) == len(keep_ind) == 500 # use max_num score, label, mask, keep_ind = \ mask_matrix_nms(masks, labels, scores, nms_pre=500, max_num=100) assert len(score) == len(label) == \ len(mask) == len(keep_ind) == 100 masks, labels, _ = _create_mask(1, 28, 28) scores = torch.Tensor([1.0]) masks = masks.expand(1000, 28, 28) labels = labels.expand(1000) scores = scores.expand(1000) # assert scores is decayed and update_thr is worked # if with the same mask, label, and all scores = 1 # the first score will set to 1, others will decay. score, label, mask, keep_ind = \ mask_matrix_nms(masks, labels, scores, nms_pre=500, max_num=100, kernel='gaussian', sigma=2.0, filter_thr=0.5) assert len(score) == 1 assert score[0] == 1 ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_replace_cfg_vals.py ================================================ import os.path as osp import tempfile from copy import deepcopy import pytest from mmcv.utils import Config from mmdet.utils import replace_cfg_vals def test_replace_cfg_vals(): temp_file = tempfile.NamedTemporaryFile() cfg_path = f'{temp_file.name}.py' with open(cfg_path, 'w') as f: f.write('configs') ori_cfg_dict = dict() ori_cfg_dict['cfg_name'] = osp.basename(temp_file.name) ori_cfg_dict['work_dir'] = 'work_dirs/${cfg_name}/${percent}/${fold}' ori_cfg_dict['percent'] = 5 ori_cfg_dict['fold'] = 1 ori_cfg_dict['model_wrapper'] = dict( type='SoftTeacher', detector='${model}') ori_cfg_dict['model'] = dict( type='FasterRCNN', backbone=dict(type='ResNet'), neck=dict(type='FPN'), rpn_head=dict(type='RPNHead'), roi_head=dict(type='StandardRoIHead'), train_cfg=dict( rpn=dict( assigner=dict(type='MaxIoUAssigner'), sampler=dict(type='RandomSampler'), ), rpn_proposal=dict(nms=dict(type='nms', iou_threshold=0.7)), rcnn=dict( assigner=dict(type='MaxIoUAssigner'), sampler=dict(type='RandomSampler'), ), ), test_cfg=dict( rpn=dict(nms=dict(type='nms', iou_threshold=0.7)), rcnn=dict(nms=dict(type='nms', iou_threshold=0.5)), ), ) ori_cfg_dict['iou_threshold'] = dict( rpn_proposal_nms='${model.train_cfg.rpn_proposal.nms.iou_threshold}', test_rpn_nms='${model.test_cfg.rpn.nms.iou_threshold}', test_rcnn_nms='${model.test_cfg.rcnn.nms.iou_threshold}', ) ori_cfg_dict['str'] = 'Hello, world!' ori_cfg_dict['dict'] = {'Hello': 'world!'} ori_cfg_dict['list'] = [ 'Hello, world!', ] ori_cfg_dict['tuple'] = ('Hello, world!', ) ori_cfg_dict['test_str'] = 'xxx${str}xxx' ori_cfg = Config(ori_cfg_dict, filename=cfg_path) updated_cfg = replace_cfg_vals(deepcopy(ori_cfg)) assert updated_cfg.work_dir \ == f'work_dirs/{osp.basename(temp_file.name)}/5/1' assert updated_cfg.model.detector == ori_cfg.model assert updated_cfg.iou_threshold.rpn_proposal_nms \ == ori_cfg.model.train_cfg.rpn_proposal.nms.iou_threshold assert updated_cfg.test_str == 'xxxHello, world!xxx' ori_cfg_dict['test_dict'] = 'xxx${dict}xxx' ori_cfg_dict['test_list'] = 'xxx${list}xxx' ori_cfg_dict['test_tuple'] = 'xxx${tuple}xxx' with pytest.raises(AssertionError): cfg = deepcopy(ori_cfg) cfg['test_dict'] = 'xxx${dict}xxx' updated_cfg = replace_cfg_vals(cfg) with pytest.raises(AssertionError): cfg = deepcopy(ori_cfg) cfg['test_list'] = 'xxx${list}xxx' updated_cfg = replace_cfg_vals(cfg) with pytest.raises(AssertionError): cfg = deepcopy(ori_cfg) cfg['test_tuple'] = 'xxx${tuple}xxx' updated_cfg = replace_cfg_vals(cfg) ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_setup_env.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import multiprocessing as mp import os import platform import cv2 from mmcv import Config from mmdet.utils import setup_multi_processes def test_setup_multi_processes(): # temp save system setting sys_start_mehod = mp.get_start_method(allow_none=True) sys_cv_threads = cv2.getNumThreads() # pop and temp save system env vars sys_omp_threads = os.environ.pop('OMP_NUM_THREADS', default=None) sys_mkl_threads = os.environ.pop('MKL_NUM_THREADS', default=None) # test config without setting env config = dict(data=dict(workers_per_gpu=2)) cfg = Config(config) setup_multi_processes(cfg) assert os.getenv('OMP_NUM_THREADS') == '1' assert os.getenv('MKL_NUM_THREADS') == '1' # when set to 0, the num threads will be 1 assert cv2.getNumThreads() == 1 if platform.system() != 'Windows': assert mp.get_start_method() == 'fork' # test num workers <= 1 os.environ.pop('OMP_NUM_THREADS') os.environ.pop('MKL_NUM_THREADS') config = dict(data=dict(workers_per_gpu=0)) cfg = Config(config) setup_multi_processes(cfg) assert 'OMP_NUM_THREADS' not in os.environ assert 'MKL_NUM_THREADS' not in os.environ # test manually set env var os.environ['OMP_NUM_THREADS'] = '4' config = dict(data=dict(workers_per_gpu=2)) cfg = Config(config) setup_multi_processes(cfg) assert os.getenv('OMP_NUM_THREADS') == '4' # test manually set opencv threads and mp start method config = dict( data=dict(workers_per_gpu=2), opencv_num_threads=4, mp_start_method='spawn') cfg = Config(config) setup_multi_processes(cfg) assert cv2.getNumThreads() == 4 assert mp.get_start_method() == 'spawn' # revert setting to avoid affecting other programs if sys_start_mehod: mp.set_start_method(sys_start_mehod, force=True) cv2.setNumThreads(sys_cv_threads) if sys_omp_threads: os.environ['OMP_NUM_THREADS'] = sys_omp_threads else: os.environ.pop('OMP_NUM_THREADS') if sys_mkl_threads: os.environ['MKL_NUM_THREADS'] = sys_mkl_threads else: os.environ.pop('MKL_NUM_THREADS') ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_split_batch.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from copy import deepcopy import mmcv import numpy as np import torch from mmdet.utils import split_batch def test_split_batch(): img_root = osp.join(osp.dirname(__file__), '../data/color.jpg') img = mmcv.imread(img_root, 'color') h, w, _ = img.shape gt_bboxes = np.array([[0.2 * w, 0.2 * h, 0.4 * w, 0.4 * h], [0.6 * w, 0.6 * h, 0.8 * w, 0.8 * h]], dtype=np.float32) gt_lables = np.ones(gt_bboxes.shape[0], dtype=np.int64) img = torch.tensor(img).permute(2, 0, 1) meta = dict() meta['filename'] = img_root meta['ori_shape'] = img.shape meta['img_shape'] = img.shape meta['img_norm_cfg'] = { 'mean': np.array([103.53, 116.28, 123.675], dtype=np.float32), 'std': np.array([1., 1., 1.], dtype=np.float32), 'to_rgb': False } meta['pad_shape'] = img.shape # For example, tag include sup, unsup_teacher and unsup_student, # in order to distinguish the difference between the three groups of data, # the scale_factor of sup is [0.5, 0.5, 0.5, 0.5] # the scale_factor of unsup_teacher is [1.0, 1.0, 1.0, 1.0] # the scale_factor of unsup_student is [2.0, 2.0, 2.0, 2.0] imgs = img.unsqueeze(0).repeat(9, 1, 1, 1) img_metas = [] tags = [ 'sup', 'unsup_teacher', 'unsup_student', 'unsup_teacher', 'unsup_student', 'unsup_teacher', 'unsup_student', 'unsup_teacher', 'unsup_student' ] for tag in tags: img_meta = deepcopy(meta) if tag == 'sup': img_meta['scale_factor'] = [0.5, 0.5, 0.5, 0.5] img_meta['tag'] = 'sup' elif tag == 'unsup_teacher': img_meta['scale_factor'] = [1.0, 1.0, 1.0, 1.0] img_meta['tag'] = 'unsup_teacher' elif tag == 'unsup_student': img_meta['scale_factor'] = [2.0, 2.0, 2.0, 2.0] img_meta['tag'] = 'unsup_student' else: continue img_metas.append(img_meta) kwargs = dict() kwargs['gt_bboxes'] = [torch.tensor(gt_bboxes)] + [torch.zeros(0, 4)] * 8 kwargs['gt_lables'] = [torch.tensor(gt_lables)] + [torch.zeros(0, )] * 8 data_groups = split_batch(imgs, img_metas, kwargs) assert set(data_groups.keys()) == set(tags) assert data_groups['sup']['img'].shape == (1, 3, h, w) assert data_groups['unsup_teacher']['img'].shape == (4, 3, h, w) assert data_groups['unsup_student']['img'].shape == (4, 3, h, w) # the scale_factor of sup is [0.5, 0.5, 0.5, 0.5] assert data_groups['sup']['img_metas'][0]['scale_factor'] == [ 0.5, 0.5, 0.5, 0.5 ] # the scale_factor of unsup_teacher is [1.0, 1.0, 1.0, 1.0] assert data_groups['unsup_teacher']['img_metas'][0]['scale_factor'] == [ 1.0, 1.0, 1.0, 1.0 ] assert data_groups['unsup_teacher']['img_metas'][1]['scale_factor'] == [ 1.0, 1.0, 1.0, 1.0 ] assert data_groups['unsup_teacher']['img_metas'][2]['scale_factor'] == [ 1.0, 1.0, 1.0, 1.0 ] assert data_groups['unsup_teacher']['img_metas'][3]['scale_factor'] == [ 1.0, 1.0, 1.0, 1.0 ] # the scale_factor of unsup_student is [2.0, 2.0, 2.0, 2.0] assert data_groups['unsup_student']['img_metas'][0]['scale_factor'] == [ 2.0, 2.0, 2.0, 2.0 ] assert data_groups['unsup_student']['img_metas'][1]['scale_factor'] == [ 2.0, 2.0, 2.0, 2.0 ] assert data_groups['unsup_student']['img_metas'][2]['scale_factor'] == [ 2.0, 2.0, 2.0, 2.0 ] assert data_groups['unsup_student']['img_metas'][3]['scale_factor'] == [ 2.0, 2.0, 2.0, 2.0 ] ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_version.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from mmdet import digit_version def test_version_check(): assert digit_version('1.0.5') > digit_version('1.0.5rc0') assert digit_version('1.0.5') > digit_version('1.0.4rc0') assert digit_version('1.0.5') > digit_version('1.0rc0') assert digit_version('1.0.0') > digit_version('0.6.2') assert digit_version('1.0.0') > digit_version('0.2.16') assert digit_version('1.0.5rc0') > digit_version('1.0.0rc0') assert digit_version('1.0.0rc1') > digit_version('1.0.0rc0') assert digit_version('1.0.0rc2') > digit_version('1.0.0rc0') assert digit_version('1.0.0rc2') > digit_version('1.0.0rc1') assert digit_version('1.0.1rc1') > digit_version('1.0.0rc1') assert digit_version('1.0.0') > digit_version('1.0.0rc1') ================================================ FILE: DLTA_AI_app/mmdetection/tests/test_utils/test_visualization.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp import tempfile import mmcv import numpy as np import pytest import torch from mmdet.core import visualization as vis from mmdet.datasets import (CityscapesDataset, CocoDataset, CocoPanopticDataset, VOCDataset) def test_color(): assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.) assert vis.color_val_matplotlib('green') == (0., 1., 0.) assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255) assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255) assert vis.color_val_matplotlib(np.zeros(3, dtype=np.int)) == (0., 0., 0.) # forbid white color with pytest.raises(TypeError): vis.color_val_matplotlib([255, 255, 255]) # forbid float with pytest.raises(TypeError): vis.color_val_matplotlib(1.0) # overflowed with pytest.raises(AssertionError): vis.color_val_matplotlib((0, 0, 500)) def test_imshow_det_bboxes(): tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image', 'image.jpg') image = np.ones((10, 10, 3), np.uint8) bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]]) label = np.array([0, 1]) out_image = vis.imshow_det_bboxes( image, bbox, label, out_file=tmp_filename, show=False) assert osp.isfile(tmp_filename) assert image.shape == out_image.shape assert not np.allclose(image, out_image) os.remove(tmp_filename) # test grayscale images image = np.ones((10, 10), np.uint8) bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]]) label = np.array([0, 1]) out_image = vis.imshow_det_bboxes( image, bbox, label, out_file=tmp_filename, show=False) assert osp.isfile(tmp_filename) assert image.shape == out_image.shape[:2] os.remove(tmp_filename) # test shaped (0,) image = np.ones((10, 10, 3), np.uint8) bbox = np.ones((0, 4)) label = np.ones((0, )) vis.imshow_det_bboxes( image, bbox, label, out_file=tmp_filename, show=False) assert osp.isfile(tmp_filename) os.remove(tmp_filename) # test mask image = np.ones((10, 10, 3), np.uint8) bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]]) label = np.array([0, 1]) segms = np.random.random((2, 10, 10)) > 0.5 segms = np.array(segms, np.int32) vis.imshow_det_bboxes( image, bbox, label, segms, out_file=tmp_filename, show=False) assert osp.isfile(tmp_filename) os.remove(tmp_filename) # test tensor mask type error with pytest.raises(AttributeError): segms = torch.tensor(segms) vis.imshow_det_bboxes(image, bbox, label, segms, show=False) def test_imshow_gt_det_bboxes(): tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image', 'image.jpg') image = np.ones((10, 10, 3), np.uint8) bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]]) label = np.array([0, 1]) annotation = dict(gt_bboxes=bbox, gt_labels=label) det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]]) result = [det_result] out_image = vis.imshow_gt_det_bboxes( image, annotation, result, out_file=tmp_filename, show=False) assert osp.isfile(tmp_filename) assert image.shape == out_image.shape assert not np.allclose(image, out_image) os.remove(tmp_filename) # test grayscale images image = np.ones((10, 10), np.uint8) bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]]) label = np.array([0, 1]) annotation = dict(gt_bboxes=bbox, gt_labels=label) det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]]) result = [det_result] vis.imshow_gt_det_bboxes( image, annotation, result, out_file=tmp_filename, show=False) assert osp.isfile(tmp_filename) os.remove(tmp_filename) # test numpy mask gt_mask = np.ones((2, 10, 10)) annotation['gt_masks'] = gt_mask vis.imshow_gt_det_bboxes( image, annotation, result, out_file=tmp_filename, show=False) assert osp.isfile(tmp_filename) os.remove(tmp_filename) # test tensor mask gt_mask = torch.ones((2, 10, 10)) annotation['gt_masks'] = gt_mask vis.imshow_gt_det_bboxes( image, annotation, result, out_file=tmp_filename, show=False) assert osp.isfile(tmp_filename) os.remove(tmp_filename) # test unsupported type annotation['gt_masks'] = [] with pytest.raises(TypeError): vis.imshow_gt_det_bboxes(image, annotation, result, show=False) def test_palette(): assert vis.palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255) # test list palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] palette_ = vis.get_palette(palette, 3) for color, color_ in zip(palette, palette_): assert color == color_ # test tuple palette = vis.get_palette((1, 2, 3), 3) assert len(palette) == 3 for color in palette: assert color == (1, 2, 3) # test color str palette = vis.get_palette('red', 3) assert len(palette) == 3 for color in palette: assert color == (255, 0, 0) # test dataset str palette = vis.get_palette('coco', len(CocoDataset.CLASSES)) assert len(palette) == len(CocoDataset.CLASSES) assert palette[0] == (220, 20, 60) palette = vis.get_palette('coco', len(CocoPanopticDataset.CLASSES)) assert len(palette) == len(CocoPanopticDataset.CLASSES) assert palette[-1] == (250, 141, 255) palette = vis.get_palette('voc', len(VOCDataset.CLASSES)) assert len(palette) == len(VOCDataset.CLASSES) assert palette[0] == (106, 0, 228) palette = vis.get_palette('citys', len(CityscapesDataset.CLASSES)) assert len(palette) == len(CityscapesDataset.CLASSES) assert palette[0] == (220, 20, 60) # test random palette1 = vis.get_palette('random', 3) palette2 = vis.get_palette(None, 3) for color1, color2 in zip(palette1, palette2): assert isinstance(color1, tuple) assert isinstance(color2, tuple) assert color1 == color2 ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/analyze_logs.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import json from collections import defaultdict import matplotlib.pyplot as plt import numpy as np import seaborn as sns def cal_train_time(log_dicts, args): for i, log_dict in enumerate(log_dicts): print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') all_times = [] for epoch in log_dict.keys(): if args.include_outliers: all_times.append(log_dict[epoch]['time']) else: all_times.append(log_dict[epoch]['time'][1:]) if not all_times: raise KeyError( 'Please reduce the log interval in the config so that' 'interval is less than iterations of one epoch.') all_times = np.array(all_times) epoch_ave_time = all_times.mean(-1) slowest_epoch = epoch_ave_time.argmax() fastest_epoch = epoch_ave_time.argmin() std_over_epoch = epoch_ave_time.std() print(f'slowest epoch {slowest_epoch + 1}, ' f'average time is {epoch_ave_time[slowest_epoch]:.4f}') print(f'fastest epoch {fastest_epoch + 1}, ' f'average time is {epoch_ave_time[fastest_epoch]:.4f}') print(f'time std over epochs is {std_over_epoch:.4f}') print(f'average iter time: {np.mean(all_times):.4f} s/iter') print() def plot_curve(log_dicts, args): if args.backend is not None: plt.switch_backend(args.backend) sns.set_style(args.style) # if legend is None, use {filename}_{key} as legend legend = args.legend if legend is None: legend = [] for json_log in args.json_logs: for metric in args.keys: legend.append(f'{json_log}_{metric}') assert len(legend) == (len(args.json_logs) * len(args.keys)) metrics = args.keys num_metrics = len(metrics) for i, log_dict in enumerate(log_dicts): epochs = list(log_dict.keys()) for j, metric in enumerate(metrics): print(f'plot curve of {args.json_logs[i]}, metric is {metric}') if metric not in log_dict[epochs[int(args.eval_interval) - 1]]: if 'mAP' in metric: raise KeyError( f'{args.json_logs[i]} does not contain metric ' f'{metric}. Please check if "--no-validate" is ' 'specified when you trained the model.') raise KeyError( f'{args.json_logs[i]} does not contain metric {metric}. ' 'Please reduce the log interval in the config so that ' 'interval is less than iterations of one epoch.') if 'mAP' in metric: xs = [] ys = [] for epoch in epochs: ys += log_dict[epoch][metric] if 'val' in log_dict[epoch]['mode']: xs.append(epoch) plt.xlabel('epoch') plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') else: xs = [] ys = [] num_iters_per_epoch = log_dict[epochs[0]]['iter'][-2] for epoch in epochs: iters = log_dict[epoch]['iter'] if log_dict[epoch]['mode'][-1] == 'val': iters = iters[:-1] xs.append( np.array(iters) + (epoch - 1) * num_iters_per_epoch) ys.append(np.array(log_dict[epoch][metric][:len(iters)])) xs = np.concatenate(xs) ys = np.concatenate(ys) plt.xlabel('iter') plt.plot( xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) plt.legend() if args.title is not None: plt.title(args.title) if args.out is None: plt.show() else: print(f'save curve to: {args.out}') plt.savefig(args.out) plt.cla() def add_plot_parser(subparsers): parser_plt = subparsers.add_parser( 'plot_curve', help='parser for plotting curves') parser_plt.add_argument( 'json_logs', type=str, nargs='+', help='path of train log in json format') parser_plt.add_argument( '--keys', type=str, nargs='+', default=['bbox_mAP'], help='the metric that you want to plot') parser_plt.add_argument( '--start-epoch', type=str, default='1', help='the epoch that you want to start') parser_plt.add_argument( '--eval-interval', type=str, default='1', help='the eval interval when training') parser_plt.add_argument('--title', type=str, help='title of figure') parser_plt.add_argument( '--legend', type=str, nargs='+', default=None, help='legend of each plot') parser_plt.add_argument( '--backend', type=str, default=None, help='backend of plt') parser_plt.add_argument( '--style', type=str, default='dark', help='style of plt') parser_plt.add_argument('--out', type=str, default=None) def add_time_parser(subparsers): parser_time = subparsers.add_parser( 'cal_train_time', help='parser for computing the average time per training iteration') parser_time.add_argument( 'json_logs', type=str, nargs='+', help='path of train log in json format') parser_time.add_argument( '--include-outliers', action='store_true', help='include the first value of every epoch when computing ' 'the average time') def parse_args(): parser = argparse.ArgumentParser(description='Analyze Json Log') # currently only support plot curve and calculate average train time subparsers = parser.add_subparsers(dest='task', help='task parser') add_plot_parser(subparsers) add_time_parser(subparsers) args = parser.parse_args() return args def load_json_logs(json_logs): # load and convert json_logs to log_dict, key is epoch, value is a sub dict # keys of sub dict is different metrics, e.g. memory, bbox_mAP # value of sub dict is a list of corresponding values of all iterations log_dicts = [dict() for _ in json_logs] for json_log, log_dict in zip(json_logs, log_dicts): with open(json_log, 'r') as log_file: for i, line in enumerate(log_file): log = json.loads(line.strip()) # skip the first training info line if i == 0: continue # skip lines without `epoch` field if 'epoch' not in log: continue epoch = log.pop('epoch') if epoch not in log_dict: log_dict[epoch] = defaultdict(list) for k, v in log.items(): log_dict[epoch][k].append(v) return log_dicts def main(): args = parse_args() json_logs = args.json_logs for json_log in json_logs: assert json_log.endswith('.json') log_dicts = load_json_logs(json_logs) eval(args.task)(log_dicts, args) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/analyze_results.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp from multiprocessing import Pool import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.core.evaluation import eval_map from mmdet.core.visualization import imshow_gt_det_bboxes from mmdet.datasets import build_dataset, get_loading_pipeline from mmdet.datasets.api_wrappers import pq_compute_single_core from mmdet.utils import replace_cfg_vals, update_data_root def bbox_map_eval(det_result, annotation, nproc=4): """Evaluate mAP of single image det result. Args: det_result (list[list]): [[cls1_det, cls2_det, ...], ...]. The outer list indicates images, and the inner list indicates per-class detected bboxes. annotation (dict): Ground truth annotations where keys of annotations are: - bboxes: numpy array of shape (n, 4) - labels: numpy array of shape (n, ) - bboxes_ignore (optional): numpy array of shape (k, 4) - labels_ignore (optional): numpy array of shape (k, ) nproc (int): Processes used for computing mAP. Default: 4. Returns: float: mAP """ # use only bbox det result if isinstance(det_result, tuple): bbox_det_result = [det_result[0]] else: bbox_det_result = [det_result] # mAP iou_thrs = np.linspace( .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) processes = [] workers = Pool(processes=nproc) for thr in iou_thrs: p = workers.apply_async(eval_map, (bbox_det_result, [annotation]), { 'iou_thr': thr, 'logger': 'silent', 'nproc': 1 }) processes.append(p) workers.close() workers.join() mean_aps = [] for p in processes: mean_aps.append(p.get()[0]) return sum(mean_aps) / len(mean_aps) class ResultVisualizer: """Display and save evaluation results. Args: show (bool): Whether to show the image. Default: True. wait_time (float): Value of waitKey param. Default: 0. score_thr (float): Minimum score of bboxes to be shown. Default: 0. overlay_gt_pred (bool): Whether to plot gts and predictions on the same image. If False, predictions and gts will be plotted on two same image which will be concatenated in vertical direction. The image above is drawn with gt, and the image below is drawn with the prediction result. Default: False. """ def __init__(self, show=False, wait_time=0, score_thr=0, overlay_gt_pred=False): self.show = show self.wait_time = wait_time self.score_thr = score_thr self.overlay_gt_pred = overlay_gt_pred def _save_image_gts_results(self, dataset, results, performances, out_dir=None): """Display or save image with groung truths and predictions from a model. Args: dataset (Dataset): A PyTorch dataset. results (list): Object detection or panoptic segmentation results from test results pkl file. performances (dict): A dict contains samples's indices in dataset and model's performance on them. out_dir (str, optional): The filename to write the image. Defaults: None. """ mmcv.mkdir_or_exist(out_dir) for performance_info in performances: index, performance = performance_info data_info = dataset.prepare_train_img(index) # calc save file path filename = data_info['filename'] if data_info['img_prefix'] is not None: filename = osp.join(data_info['img_prefix'], filename) else: filename = data_info['filename'] fname, name = osp.splitext(osp.basename(filename)) save_filename = fname + '_' + str(round(performance, 3)) + name out_file = osp.join(out_dir, save_filename) imshow_gt_det_bboxes( data_info['img'], data_info, results[index], dataset.CLASSES, gt_bbox_color=dataset.PALETTE, gt_text_color=(200, 200, 200), gt_mask_color=dataset.PALETTE, det_bbox_color=dataset.PALETTE, det_text_color=(200, 200, 200), det_mask_color=dataset.PALETTE, show=self.show, score_thr=self.score_thr, wait_time=self.wait_time, out_file=out_file, overlay_gt_pred=self.overlay_gt_pred) def evaluate_and_show(self, dataset, results, topk=20, show_dir='work_dir'): """Evaluate and show results. Args: dataset (Dataset): A PyTorch dataset. results (list): Object detection or panoptic segmentation results from test results pkl file. topk (int): Number of the highest topk and lowest topk after evaluation index sorting. Default: 20. show_dir (str, optional): The filename to write the image. Default: 'work_dir' eval_fn (callable, optional): Eval function, Default: None. """ assert topk > 0 if (topk * 2) > len(dataset): topk = len(dataset) // 2 if isinstance(results[0], dict): good_samples, bad_samples = self.panoptic_evaluate( dataset, results, topk=topk) elif isinstance(results[0], list): good_samples, bad_samples = self.detection_evaluate( dataset, results, topk=topk) elif isinstance(results[0], tuple): results_ = [result[0] for result in results] good_samples, bad_samples = self.detection_evaluate( dataset, results_, topk=topk) else: raise 'The format of result is not supported yet. ' \ 'Current dict for panoptic segmentation and list ' \ 'or tuple for object detection are supported.' good_dir = osp.abspath(osp.join(show_dir, 'good')) bad_dir = osp.abspath(osp.join(show_dir, 'bad')) self._save_image_gts_results(dataset, results, good_samples, good_dir) self._save_image_gts_results(dataset, results, bad_samples, bad_dir) def detection_evaluate(self, dataset, results, topk=20, eval_fn=None): """Evaluation for object detection. Args: dataset (Dataset): A PyTorch dataset. results (list): Object detection results from test results pkl file. topk (int): Number of the highest topk and lowest topk after evaluation index sorting. Default: 20. eval_fn (callable, optional): Eval function, Default: None. Returns: tuple: A tuple contains good samples and bad samples. good_mAPs (dict[int, float]): A dict contains good samples's indices in dataset and model's performance on them. bad_mAPs (dict[int, float]): A dict contains bad samples's indices in dataset and model's performance on them. """ if eval_fn is None: eval_fn = bbox_map_eval else: assert callable(eval_fn) prog_bar = mmcv.ProgressBar(len(results)) _mAPs = {} for i, (result, ) in enumerate(zip(results)): # self.dataset[i] should not call directly # because there is a risk of mismatch data_info = dataset.prepare_train_img(i) mAP = eval_fn(result, data_info['ann_info']) _mAPs[i] = mAP prog_bar.update() # descending select topk image _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1])) good_mAPs = _mAPs[-topk:] bad_mAPs = _mAPs[:topk] return good_mAPs, bad_mAPs def panoptic_evaluate(self, dataset, results, topk=20): """Evaluation for panoptic segmentation. Args: dataset (Dataset): A PyTorch dataset. results (list): Panoptic segmentation results from test results pkl file. topk (int): Number of the highest topk and lowest topk after evaluation index sorting. Default: 20. Returns: tuple: A tuple contains good samples and bad samples. good_pqs (dict[int, float]): A dict contains good samples's indices in dataset and model's performance on them. bad_pqs (dict[int, float]): A dict contains bad samples's indices in dataset and model's performance on them. """ # image to annotations gt_json = dataset.coco.img_ann_map result_files, tmp_dir = dataset.format_results(results) pred_json = mmcv.load(result_files['panoptic'])['annotations'] pred_folder = osp.join(tmp_dir.name, 'panoptic') gt_folder = dataset.seg_prefix pqs = {} prog_bar = mmcv.ProgressBar(len(results)) for i in range(len(results)): data_info = dataset.prepare_train_img(i) image_id = data_info['img_info']['id'] gt_ann = { 'image_id': image_id, 'segments_info': gt_json[image_id], 'file_name': data_info['img_info']['segm_file'] } pred_ann = pred_json[i] pq_stat = pq_compute_single_core( i, [(gt_ann, pred_ann)], gt_folder, pred_folder, dataset.categories, dataset.file_client, print_log=False) pq_results, classwise_results = pq_stat.pq_average( dataset.categories, isthing=None) pqs[i] = pq_results['pq'] prog_bar.update() if tmp_dir is not None: tmp_dir.cleanup() # descending select topk image pqs = list(sorted(pqs.items(), key=lambda kv: kv[1])) good_pqs = pqs[-topk:] bad_pqs = pqs[:topk] return good_pqs, bad_pqs def parse_args(): parser = argparse.ArgumentParser( description='MMDet eval image prediction result for each') parser.add_argument('config', help='test config file path') parser.add_argument( 'prediction_path', help='prediction path where test pkl result') parser.add_argument( 'show_dir', help='directory where painted images will be saved') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument( '--wait-time', type=float, default=0, help='the interval of show (s), 0 is block') parser.add_argument( '--topk', default=20, type=int, help='saved Number of the highest topk ' 'and lowest topk after index sorting') parser.add_argument( '--show-score-thr', type=float, default=0, help='score threshold (default: 0.)') parser.add_argument( '--overlay-gt-pred', action='store_true', help='whether to plot gts and predictions on the same image.' 'If False, predictions and gts will be plotted on two same' 'image which will be concatenated in vertical direction.' 'The image above is drawn with gt, and the image below is' 'drawn with the prediction result.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') args = parser.parse_args() return args def main(): args = parse_args() mmcv.check_file_exist(args.prediction_path) cfg = Config.fromfile(args.config) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) cfg.data.test.test_mode = True cfg.data.test.pop('samples_per_gpu', 0) if cfg.data.train.type in ('MultiImageMixDataset', 'ClassBalancedDataset', 'RepeatDataset', 'ConcatDataset'): cfg.data.test.pipeline = get_loading_pipeline( cfg.data.train.dataset.pipeline) else: cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline) dataset = build_dataset(cfg.data.test) outputs = mmcv.load(args.prediction_path) result_visualizer = ResultVisualizer(args.show, args.wait_time, args.show_score_thr, args.overlay_gt_pred) result_visualizer.evaluate_and_show( dataset, outputs, topk=args.topk, show_dir=args.show_dir) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/benchmark.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import copy import os import time import torch from mmcv import Config, DictAction from mmcv.cnn import fuse_conv_bn from mmcv.parallel import MMDistributedDataParallel from mmcv.runner import init_dist, load_checkpoint, wrap_fp16_model from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor) from mmdet.models import build_detector from mmdet.utils import replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='MMDet benchmark a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument( '--repeat-num', type=int, default=1, help='number of repeat times of measurement for averaging the results') parser.add_argument( '--max-iter', type=int, default=2000, help='num of max iter') parser.add_argument( '--log-interval', type=int, default=50, help='interval of logging') parser.add_argument( '--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increase' 'the inference speed') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def measure_inference_speed(cfg, checkpoint, max_iter, log_interval, is_fuse_conv_bn): # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the dataloader samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, # Because multiple processes will occupy additional CPU resources, # FPS statistics will be more unstable when workers_per_gpu is not 0. # It is reasonable to set workers_per_gpu to 0. workers_per_gpu=0, dist=True, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) load_checkpoint(model, checkpoint, map_location='cpu') if is_fuse_conv_bn: model = fuse_conv_bn(model) model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) model.eval() # the first several iterations may be very slow so skip them num_warmup = 5 pure_inf_time = 0 fps = 0 # benchmark with 2000 image and take the average for i, data in enumerate(data_loader): torch.cuda.synchronize() start_time = time.perf_counter() with torch.no_grad(): model(return_loss=False, rescale=True, **data) torch.cuda.synchronize() elapsed = time.perf_counter() - start_time if i >= num_warmup: pure_inf_time += elapsed if (i + 1) % log_interval == 0: fps = (i + 1 - num_warmup) / pure_inf_time print( f'Done image [{i + 1:<3}/ {max_iter}], ' f'fps: {fps:.1f} img / s, ' f'times per image: {1000 / fps:.1f} ms / img', flush=True) if (i + 1) == max_iter: fps = (i + 1 - num_warmup) / pure_inf_time print( f'Overall fps: {fps:.1f} img / s, ' f'times per image: {1000 / fps:.1f} ms / img', flush=True) break return fps def repeat_measure_inference_speed(cfg, checkpoint, max_iter, log_interval, is_fuse_conv_bn, repeat_num=1): assert repeat_num >= 1 fps_list = [] for _ in range(repeat_num): # cp_cfg = copy.deepcopy(cfg) fps_list.append( measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval, is_fuse_conv_bn)) if repeat_num > 1: fps_list_ = [round(fps, 1) for fps in fps_list] times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list] mean_fps_ = sum(fps_list_) / len(fps_list_) mean_times_pre_image_ = sum(times_pre_image_list_) / len( times_pre_image_list_) print( f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, ' f'times per image: ' f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img', flush=True) return fps_list return fps_list[0] def main(): args = parse_args() cfg = Config.fromfile(args.config) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) if args.launcher == 'none': raise NotImplementedError('Only supports distributed mode') else: init_dist(args.launcher, **cfg.dist_params) repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter, args.log_interval, args.fuse_conv_bn, args.repeat_num) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/coco_error_analysis.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import copy import os from argparse import ArgumentParser from multiprocessing import Pool import matplotlib.pyplot as plt import numpy as np from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval def makeplot(rs, ps, outDir, class_name, iou_type): cs = np.vstack([ np.ones((2, 3)), np.array([0.31, 0.51, 0.74]), np.array([0.75, 0.31, 0.30]), np.array([0.36, 0.90, 0.38]), np.array([0.50, 0.39, 0.64]), np.array([1, 0.6, 0]), ]) areaNames = ['allarea', 'small', 'medium', 'large'] types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN'] for i in range(len(areaNames)): area_ps = ps[..., i, 0] figure_title = iou_type + '-' + class_name + '-' + areaNames[i] aps = [ps_.mean() for ps_ in area_ps] ps_curve = [ ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps ] ps_curve.insert(0, np.zeros(ps_curve[0].shape)) fig = plt.figure() ax = plt.subplot(111) for k in range(len(types)): ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5) ax.fill_between( rs, ps_curve[k], ps_curve[k + 1], color=cs[k], label=str(f'[{aps[k]:.3f}]' + types[k]), ) plt.xlabel('recall') plt.ylabel('precision') plt.xlim(0, 1.0) plt.ylim(0, 1.0) plt.title(figure_title) plt.legend() # plt.show() fig.savefig(outDir + f'/{figure_title}.png') plt.close(fig) def autolabel(ax, rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() if height > 0 and height <= 1: # for percent values text_label = '{:2.0f}'.format(height * 100) else: text_label = '{:2.0f}'.format(height) ax.annotate( text_label, xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords='offset points', ha='center', va='bottom', fontsize='x-small', ) def makebarplot(rs, ps, outDir, class_name, iou_type): areaNames = ['allarea', 'small', 'medium', 'large'] types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN'] fig, ax = plt.subplots() x = np.arange(len(areaNames)) # the areaNames locations width = 0.60 # the width of the bars rects_list = [] figure_title = iou_type + '-' + class_name + '-' + 'ap bar plot' for i in range(len(types) - 1): type_ps = ps[i, ..., 0] aps = [ps_.mean() for ps_ in type_ps.T] rects_list.append( ax.bar( x - width / 2 + (i + 1) * width / len(types), aps, width / len(types), label=types[i], )) # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('Mean Average Precision (mAP)') ax.set_title(figure_title) ax.set_xticks(x) ax.set_xticklabels(areaNames) ax.legend() # Add score texts over bars for rects in rects_list: autolabel(ax, rects) # Save plot fig.savefig(outDir + f'/{figure_title}.png') plt.close(fig) def get_gt_area_group_numbers(cocoEval): areaRng = cocoEval.params.areaRng areaRngStr = [str(aRng) for aRng in areaRng] areaRngLbl = cocoEval.params.areaRngLbl areaRngStr2areaRngLbl = dict(zip(areaRngStr, areaRngLbl)) areaRngLbl2Number = dict.fromkeys(areaRngLbl, 0) for evalImg in cocoEval.evalImgs: if evalImg: for gtIgnore in evalImg['gtIgnore']: if not gtIgnore: aRngLbl = areaRngStr2areaRngLbl[str(evalImg['aRng'])] areaRngLbl2Number[aRngLbl] += 1 return areaRngLbl2Number def make_gt_area_group_numbers_plot(cocoEval, outDir, verbose=True): areaRngLbl2Number = get_gt_area_group_numbers(cocoEval) areaRngLbl = areaRngLbl2Number.keys() if verbose: print('number of annotations per area group:', areaRngLbl2Number) # Init figure fig, ax = plt.subplots() x = np.arange(len(areaRngLbl)) # the areaNames locations width = 0.60 # the width of the bars figure_title = 'number of annotations per area group' rects = ax.bar(x, areaRngLbl2Number.values(), width) # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('Number of annotations') ax.set_title(figure_title) ax.set_xticks(x) ax.set_xticklabels(areaRngLbl) # Add score texts over bars autolabel(ax, rects) # Save plot fig.tight_layout() fig.savefig(outDir + f'/{figure_title}.png') plt.close(fig) def make_gt_area_histogram_plot(cocoEval, outDir): n_bins = 100 areas = [ann['area'] for ann in cocoEval.cocoGt.anns.values()] # init figure figure_title = 'gt annotation areas histogram plot' fig, ax = plt.subplots() # Set the number of bins ax.hist(np.sqrt(areas), bins=n_bins) # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_xlabel('Squareroot Area') ax.set_ylabel('Number of annotations') ax.set_title(figure_title) # Save plot fig.tight_layout() fig.savefig(outDir + f'/{figure_title}.png') plt.close(fig) def analyze_individual_category(k, cocoDt, cocoGt, catId, iou_type, areas=None): nm = cocoGt.loadCats(catId)[0] print(f'--------------analyzing {k + 1}-{nm["name"]}---------------') ps_ = {} dt = copy.deepcopy(cocoDt) nm = cocoGt.loadCats(catId)[0] imgIds = cocoGt.getImgIds() dt_anns = dt.dataset['annotations'] select_dt_anns = [] for ann in dt_anns: if ann['category_id'] == catId: select_dt_anns.append(ann) dt.dataset['annotations'] = select_dt_anns dt.createIndex() # compute precision but ignore superclass confusion gt = copy.deepcopy(cocoGt) child_catIds = gt.getCatIds(supNms=[nm['supercategory']]) for idx, ann in enumerate(gt.dataset['annotations']): if ann['category_id'] in child_catIds and ann['category_id'] != catId: gt.dataset['annotations'][idx]['ignore'] = 1 gt.dataset['annotations'][idx]['iscrowd'] = 1 gt.dataset['annotations'][idx]['category_id'] = catId cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type) cocoEval.params.imgIds = imgIds cocoEval.params.maxDets = [100] cocoEval.params.iouThrs = [0.1] cocoEval.params.useCats = 1 if areas: cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], [areas[0], areas[1]], [areas[1], areas[2]]] cocoEval.evaluate() cocoEval.accumulate() ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :] ps_['ps_supercategory'] = ps_supercategory # compute precision but ignore any class confusion gt = copy.deepcopy(cocoGt) for idx, ann in enumerate(gt.dataset['annotations']): if ann['category_id'] != catId: gt.dataset['annotations'][idx]['ignore'] = 1 gt.dataset['annotations'][idx]['iscrowd'] = 1 gt.dataset['annotations'][idx]['category_id'] = catId cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type) cocoEval.params.imgIds = imgIds cocoEval.params.maxDets = [100] cocoEval.params.iouThrs = [0.1] cocoEval.params.useCats = 1 if areas: cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], [areas[0], areas[1]], [areas[1], areas[2]]] cocoEval.evaluate() cocoEval.accumulate() ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :] ps_['ps_allcategory'] = ps_allcategory return k, ps_ def analyze_results(res_file, ann_file, res_types, out_dir, extraplots=None, areas=None): for res_type in res_types: assert res_type in ['bbox', 'segm'] if areas: assert len(areas) == 3, '3 integers should be specified as areas, \ representing 3 area regions' directory = os.path.dirname(out_dir + '/') if not os.path.exists(directory): print(f'-------------create {out_dir}-----------------') os.makedirs(directory) cocoGt = COCO(ann_file) cocoDt = cocoGt.loadRes(res_file) imgIds = cocoGt.getImgIds() for res_type in res_types: res_out_dir = out_dir + '/' + res_type + '/' res_directory = os.path.dirname(res_out_dir) if not os.path.exists(res_directory): print(f'-------------create {res_out_dir}-----------------') os.makedirs(res_directory) iou_type = res_type cocoEval = COCOeval( copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type) cocoEval.params.imgIds = imgIds cocoEval.params.iouThrs = [0.75, 0.5, 0.1] cocoEval.params.maxDets = [100] if areas: cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], [areas[0], areas[1]], [areas[1], areas[2]]] cocoEval.evaluate() cocoEval.accumulate() ps = cocoEval.eval['precision'] ps = np.vstack([ps, np.zeros((4, *ps.shape[1:]))]) catIds = cocoGt.getCatIds() recThrs = cocoEval.params.recThrs with Pool(processes=48) as pool: args = [(k, cocoDt, cocoGt, catId, iou_type, areas) for k, catId in enumerate(catIds)] analyze_results = pool.starmap(analyze_individual_category, args) for k, catId in enumerate(catIds): nm = cocoGt.loadCats(catId)[0] print(f'--------------saving {k + 1}-{nm["name"]}---------------') analyze_result = analyze_results[k] assert k == analyze_result[0] ps_supercategory = analyze_result[1]['ps_supercategory'] ps_allcategory = analyze_result[1]['ps_allcategory'] # compute precision but ignore superclass confusion ps[3, :, k, :, :] = ps_supercategory # compute precision but ignore any class confusion ps[4, :, k, :, :] = ps_allcategory # fill in background and false negative errors and plot ps[ps == -1] = 0 ps[5, :, k, :, :] = ps[4, :, k, :, :] > 0 ps[6, :, k, :, :] = 1.0 makeplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type) if extraplots: makebarplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type) makeplot(recThrs, ps, res_out_dir, 'allclass', iou_type) if extraplots: makebarplot(recThrs, ps, res_out_dir, 'allclass', iou_type) make_gt_area_group_numbers_plot( cocoEval=cocoEval, outDir=res_out_dir, verbose=True) make_gt_area_histogram_plot(cocoEval=cocoEval, outDir=res_out_dir) def main(): parser = ArgumentParser(description='COCO Error Analysis Tool') parser.add_argument('result', help='result file (json format) path') parser.add_argument('out_dir', help='dir to save analyze result images') parser.add_argument( '--ann', default='data/coco/annotations/instances_val2017.json', help='annotation file path') parser.add_argument( '--types', type=str, nargs='+', default=['bbox'], help='result types') parser.add_argument( '--extraplots', action='store_true', help='export extra bar/stat plots') parser.add_argument( '--areas', type=int, nargs='+', default=[1024, 9216, 10000000000], help='area regions') args = parser.parse_args() analyze_results( args.result, args.ann, args.types, out_dir=args.out_dir, extraplots=args.extraplots, areas=args.areas) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/confusion_matrix.py ================================================ import argparse import os import matplotlib.pyplot as plt import mmcv import numpy as np from matplotlib.ticker import MultipleLocator from mmcv import Config, DictAction from mmcv.ops import nms from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps from mmdet.datasets import build_dataset from mmdet.utils import replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser( description='Generate confusion matrix from detection results') parser.add_argument('config', help='test config file path') parser.add_argument( 'prediction_path', help='prediction path where test .pkl result') parser.add_argument( 'save_dir', help='directory where confusion matrix will be saved') parser.add_argument( '--show', action='store_true', help='show confusion matrix') parser.add_argument( '--color-theme', default='plasma', help='theme of the matrix color map') parser.add_argument( '--score-thr', type=float, default=0.3, help='score threshold to filter detection bboxes') parser.add_argument( '--tp-iou-thr', type=float, default=0.5, help='IoU threshold to be considered as matched') parser.add_argument( '--nms-iou-thr', type=float, default=None, help='nms IoU threshold, only applied when users want to change the' 'nms IoU threshold.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') args = parser.parse_args() return args def calculate_confusion_matrix(dataset, results, score_thr=0, nms_iou_thr=None, tp_iou_thr=0.5): """Calculate the confusion matrix. Args: dataset (Dataset): Test or val dataset. results (list[ndarray]): A list of detection results in each image. score_thr (float|optional): Score threshold to filter bboxes. Default: 0. nms_iou_thr (float|optional): nms IoU threshold, the detection results have done nms in the detector, only applied when users want to change the nms IoU threshold. Default: None. tp_iou_thr (float|optional): IoU threshold to be considered as matched. Default: 0.5. """ num_classes = len(dataset.CLASSES) confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1]) assert len(dataset) == len(results) prog_bar = mmcv.ProgressBar(len(results)) for idx, per_img_res in enumerate(results): if isinstance(per_img_res, tuple): res_bboxes, _ = per_img_res else: res_bboxes = per_img_res ann = dataset.get_ann_info(idx) gt_bboxes = ann['bboxes'] labels = ann['labels'] analyze_per_img_dets(confusion_matrix, gt_bboxes, labels, res_bboxes, score_thr, tp_iou_thr, nms_iou_thr) prog_bar.update() return confusion_matrix def analyze_per_img_dets(confusion_matrix, gt_bboxes, gt_labels, result, score_thr=0, tp_iou_thr=0.5, nms_iou_thr=None): """Analyze detection results on each image. Args: confusion_matrix (ndarray): The confusion matrix, has shape (num_classes + 1, num_classes + 1). gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4). gt_labels (ndarray): Ground truth labels, has shape (num_gt). result (ndarray): Detection results, has shape (num_classes, num_bboxes, 5). score_thr (float): Score threshold to filter bboxes. Default: 0. tp_iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. nms_iou_thr (float|optional): nms IoU threshold, the detection results have done nms in the detector, only applied when users want to change the nms IoU threshold. Default: None. """ true_positives = np.zeros_like(gt_labels) for det_label, det_bboxes in enumerate(result): if nms_iou_thr: det_bboxes, _ = nms( det_bboxes[:, :4], det_bboxes[:, -1], nms_iou_thr, score_threshold=score_thr) ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes) for i, det_bbox in enumerate(det_bboxes): score = det_bbox[4] det_match = 0 if score >= score_thr: for j, gt_label in enumerate(gt_labels): if ious[i, j] >= tp_iou_thr: det_match += 1 if gt_label == det_label: true_positives[j] += 1 # TP confusion_matrix[gt_label, det_label] += 1 if det_match == 0: # BG FP confusion_matrix[-1, det_label] += 1 for num_tp, gt_label in zip(true_positives, gt_labels): if num_tp == 0: # FN confusion_matrix[gt_label, -1] += 1 def plot_confusion_matrix(confusion_matrix, labels, save_dir=None, show=True, title='Normalized Confusion Matrix', color_theme='plasma'): """Draw confusion matrix with matplotlib. Args: confusion_matrix (ndarray): The confusion matrix. labels (list[str]): List of class names. save_dir (str|optional): If set, save the confusion matrix plot to the given path. Default: None. show (bool): Whether to show the plot. Default: True. title (str): Title of the plot. Default: `Normalized Confusion Matrix`. color_theme (str): Theme of the matrix color map. Default: `plasma`. """ # normalize the confusion matrix per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis] confusion_matrix = \ confusion_matrix.astype(np.float32) / per_label_sums * 100 num_classes = len(labels) fig, ax = plt.subplots( figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180) cmap = plt.get_cmap(color_theme) im = ax.imshow(confusion_matrix, cmap=cmap) plt.colorbar(mappable=im, ax=ax) title_font = {'weight': 'bold', 'size': 12} ax.set_title(title, fontdict=title_font) label_font = {'size': 10} plt.ylabel('Ground Truth Label', fontdict=label_font) plt.xlabel('Prediction Label', fontdict=label_font) # draw locator xmajor_locator = MultipleLocator(1) xminor_locator = MultipleLocator(0.5) ax.xaxis.set_major_locator(xmajor_locator) ax.xaxis.set_minor_locator(xminor_locator) ymajor_locator = MultipleLocator(1) yminor_locator = MultipleLocator(0.5) ax.yaxis.set_major_locator(ymajor_locator) ax.yaxis.set_minor_locator(yminor_locator) # draw grid ax.grid(True, which='minor', linestyle='-') # draw label ax.set_xticks(np.arange(num_classes)) ax.set_yticks(np.arange(num_classes)) ax.set_xticklabels(labels) ax.set_yticklabels(labels) ax.tick_params( axis='x', bottom=False, top=True, labelbottom=False, labeltop=True) plt.setp( ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor') # draw confution matrix value for i in range(num_classes): for j in range(num_classes): ax.text( j, i, '{}%'.format( int(confusion_matrix[ i, j]) if not np.isnan(confusion_matrix[i, j]) else -1), ha='center', va='center', color='w', size=7) ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1 fig.tight_layout() if save_dir is not None: plt.savefig( os.path.join(save_dir, 'confusion_matrix.png'), format='png') if show: plt.show() def main(): args = parse_args() cfg = Config.fromfile(args.config) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) results = mmcv.load(args.prediction_path) assert isinstance(results, list) if isinstance(results[0], list): pass elif isinstance(results[0], tuple): results = [result[0] for result in results] else: raise TypeError('invalid type of prediction results') if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True dataset = build_dataset(cfg.data.test) confusion_matrix = calculate_confusion_matrix(dataset, results, args.score_thr, args.nms_iou_thr, args.tp_iou_thr) plot_confusion_matrix( confusion_matrix, dataset.CLASSES + ('background', ), save_dir=args.save_dir, show=args.show, color_theme=args.color_theme) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/eval_metric.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import mmcv from mmcv import Config, DictAction from mmdet.datasets import build_dataset from mmdet.utils import replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Evaluate metric of the ' 'results saved in pkl format') parser.add_argument('config', help='Config of the model') parser.add_argument('pkl_results', help='Results in pickle format') parser.add_argument( '--format-only', action='store_true', help='Format the output results without perform evaluation. It is' 'useful when you want to format the result to a specific format and ' 'submit it to the test server') parser.add_argument( '--eval', type=str, nargs='+', help='Evaluation metrics, which depends on the dataset, e.g., "bbox",' ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy ' 'format will be kwargs for dataset.evaluate() function') args = parser.parse_args() return args def main(): args = parse_args() cfg = Config.fromfile(args.config) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) assert args.eval or args.format_only, ( 'Please specify at least one operation (eval/format the results) with ' 'the argument "--eval", "--format-only"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) cfg.data.test.test_mode = True dataset = build_dataset(cfg.data.test) outputs = mmcv.load(args.pkl_results) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs)) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/get_flops.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import numpy as np import torch from mmcv import Config, DictAction from mmdet.models import build_detector try: from mmcv.cnn import get_model_complexity_info except ImportError: raise ImportError('Please upgrade mmcv to >0.6.2') def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument( '--shape', type=int, nargs='+', default=[1280, 800], help='input image size') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--size-divisor', type=int, default=32, help='Pad the input image, the minimum size that is divisible ' 'by size_divisor, -1 means do not pad the image.') args = parser.parse_args() return args def main(): args = parse_args() if len(args.shape) == 1: h = w = args.shape[0] elif len(args.shape) == 2: h, w = args.shape else: raise ValueError('invalid input shape') ori_shape = (3, h, w) divisor = args.size_divisor if divisor > 0: h = int(np.ceil(h / divisor)) * divisor w = int(np.ceil(w / divisor)) * divisor input_shape = (3, h, w) cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) model = build_detector( cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) if torch.cuda.is_available(): model.cuda() model.eval() if hasattr(model, 'forward_dummy'): model.forward = model.forward_dummy else: raise NotImplementedError( 'FLOPs counter is currently not currently supported with {}'. format(model.__class__.__name__)) flops, params = get_model_complexity_info(model, input_shape) split_line = '=' * 30 if divisor > 0 and \ input_shape != ori_shape: print(f'{split_line}\nUse size divisor set input shape ' f'from {ori_shape} to {input_shape}\n') print(f'{split_line}\nInput shape: {input_shape}\n' f'Flops: {flops}\nParams: {params}\n{split_line}') print('!!!Please be cautious if you use the results in papers. ' 'You may need to check if all ops are supported and verify that the ' 'flops computation is correct.') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/optimize_anchors.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """Optimize anchor settings on a specific dataset. This script provides two method to optimize YOLO anchors including k-means anchor cluster and differential evolution. You can use ``--algorithm k-means`` and ``--algorithm differential_evolution`` to switch two method. Example: Use k-means anchor cluster:: python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ --algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ --output-dir ${OUTPUT_DIR} Use differential evolution to optimize anchors:: python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ --algorithm differential_evolution \ --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ --output-dir ${OUTPUT_DIR} """ import argparse import os.path as osp import mmcv import numpy as np import torch from mmcv import Config from scipy.optimize import differential_evolution from mmdet.core import bbox_cxcywh_to_xyxy, bbox_overlaps, bbox_xyxy_to_cxcywh from mmdet.datasets import build_dataset from mmdet.utils import get_root_logger, replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Optimize anchor parameters.') parser.add_argument('config', help='Train config file path.') parser.add_argument( '--device', default='cuda:0', help='Device used for calculating.') parser.add_argument( '--input-shape', type=int, nargs='+', default=[608, 608], help='input image size') parser.add_argument( '--algorithm', default='differential_evolution', help='Algorithm used for anchor optimizing.' 'Support k-means and differential_evolution for YOLO.') parser.add_argument( '--iters', default=1000, type=int, help='Maximum iterations for optimizer.') parser.add_argument( '--output-dir', default=None, type=str, help='Path to save anchor optimize result.') args = parser.parse_args() return args class BaseAnchorOptimizer: """Base class for anchor optimizer. Args: dataset (obj:`Dataset`): Dataset object. input_shape (list[int]): Input image shape of the model. Format in [width, height]. logger (obj:`logging.Logger`): The logger for logging. device (str, optional): Device used for calculating. Default: 'cuda:0' out_dir (str, optional): Path to save anchor optimize result. Default: None """ def __init__(self, dataset, input_shape, logger, device='cuda:0', out_dir=None): self.dataset = dataset self.input_shape = input_shape self.logger = logger self.device = device self.out_dir = out_dir bbox_whs, img_shapes = self.get_whs_and_shapes() ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape]) # resize to input shape self.bbox_whs = bbox_whs / ratios def get_whs_and_shapes(self): """Get widths and heights of bboxes and shapes of images. Returns: tuple[np.ndarray]: Array of bbox shapes and array of image shapes with shape (num_bboxes, 2) in [width, height] format. """ self.logger.info('Collecting bboxes from annotation...') bbox_whs = [] img_shapes = [] prog_bar = mmcv.ProgressBar(len(self.dataset)) for idx in range(len(self.dataset)): ann = self.dataset.get_ann_info(idx) data_info = self.dataset.data_infos[idx] img_shape = np.array([data_info['width'], data_info['height']]) gt_bboxes = ann['bboxes'] for bbox in gt_bboxes: wh = bbox[2:4] - bbox[0:2] img_shapes.append(img_shape) bbox_whs.append(wh) prog_bar.update() print('\n') bbox_whs = np.array(bbox_whs) img_shapes = np.array(img_shapes) self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.') return bbox_whs, img_shapes def get_zero_center_bbox_tensor(self): """Get a tensor of bboxes centered at (0, 0). Returns: Tensor: Tensor of bboxes with shape (num_bboxes, 4) in [xmin, ymin, xmax, ymax] format. """ whs = torch.from_numpy(self.bbox_whs).to( self.device, dtype=torch.float32) bboxes = bbox_cxcywh_to_xyxy( torch.cat([torch.zeros_like(whs), whs], dim=1)) return bboxes def optimize(self): raise NotImplementedError def save_result(self, anchors, path=None): anchor_results = [] for w, h in anchors: anchor_results.append([round(w), round(h)]) self.logger.info(f'Anchor optimize result:{anchor_results}') if path: json_path = osp.join(path, 'anchor_optimize_result.json') mmcv.dump(anchor_results, json_path) self.logger.info(f'Result saved in {json_path}') class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer): r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet. `_. Args: num_anchors (int) : Number of anchors. iters (int): Maximum iterations for k-means. """ def __init__(self, num_anchors, iters, **kwargs): super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs) self.num_anchors = num_anchors self.iters = iters def optimize(self): anchors = self.kmeans_anchors() self.save_result(anchors, self.out_dir) def kmeans_anchors(self): self.logger.info( f'Start cluster {self.num_anchors} YOLO anchors with K-means...') bboxes = self.get_zero_center_bbox_tensor() cluster_center_idx = torch.randint( 0, bboxes.shape[0], (self.num_anchors, )).to(self.device) assignments = torch.zeros((bboxes.shape[0], )).to(self.device) cluster_centers = bboxes[cluster_center_idx] if self.num_anchors == 1: cluster_centers = self.kmeans_maximization(bboxes, assignments, cluster_centers) anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() anchors = sorted(anchors, key=lambda x: x[0] * x[1]) return anchors prog_bar = mmcv.ProgressBar(self.iters) for i in range(self.iters): converged, assignments = self.kmeans_expectation( bboxes, assignments, cluster_centers) if converged: self.logger.info(f'K-means process has converged at iter {i}.') break cluster_centers = self.kmeans_maximization(bboxes, assignments, cluster_centers) prog_bar.update() print('\n') avg_iou = bbox_overlaps(bboxes, cluster_centers).max(1)[0].mean().item() anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() anchors = sorted(anchors, key=lambda x: x[0] * x[1]) self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}') return anchors def kmeans_maximization(self, bboxes, assignments, centers): """Maximization part of EM algorithm(Expectation-Maximization)""" new_centers = torch.zeros_like(centers) for i in range(centers.shape[0]): mask = (assignments == i) if mask.sum(): new_centers[i, :] = bboxes[mask].mean(0) return new_centers def kmeans_expectation(self, bboxes, assignments, centers): """Expectation part of EM algorithm(Expectation-Maximization)""" ious = bbox_overlaps(bboxes, centers) closest = ious.argmax(1) converged = (closest == assignments).all() return converged, closest class YOLODEAnchorOptimizer(BaseAnchorOptimizer): """YOLO anchor optimizer using differential evolution algorithm. Args: num_anchors (int) : Number of anchors. iters (int): Maximum iterations for k-means. strategy (str): The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' - 'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' - 'best2bin' - 'rand2bin' - 'rand1bin' Default: 'best1bin'. population_size (int): Total population size of evolution algorithm. Default: 15. convergence_thr (float): Tolerance for convergence, the optimizing stops when ``np.std(pop) <= abs(convergence_thr) + convergence_thr * np.abs(np.mean(population_energies))``, respectively. Default: 0.0001. mutation (tuple[float]): Range of dithering randomly changes the mutation constant. Default: (0.5, 1). recombination (float): Recombination constant of crossover probability. Default: 0.7. """ def __init__(self, num_anchors, iters, strategy='best1bin', population_size=15, convergence_thr=0.0001, mutation=(0.5, 1), recombination=0.7, **kwargs): super(YOLODEAnchorOptimizer, self).__init__(**kwargs) self.num_anchors = num_anchors self.iters = iters self.strategy = strategy self.population_size = population_size self.convergence_thr = convergence_thr self.mutation = mutation self.recombination = recombination def optimize(self): anchors = self.differential_evolution() self.save_result(anchors, self.out_dir) def differential_evolution(self): bboxes = self.get_zero_center_bbox_tensor() bounds = [] for i in range(self.num_anchors): bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])]) result = differential_evolution( func=self.avg_iou_cost, bounds=bounds, args=(bboxes, ), strategy=self.strategy, maxiter=self.iters, popsize=self.population_size, tol=self.convergence_thr, mutation=self.mutation, recombination=self.recombination, updating='immediate', disp=True) self.logger.info( f'Anchor evolution finish. Average IOU: {1 - result.fun}') anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])] anchors = sorted(anchors, key=lambda x: x[0] * x[1]) return anchors @staticmethod def avg_iou_cost(anchor_params, bboxes): assert len(anchor_params) % 2 == 0 anchor_whs = torch.tensor( [[w, h] for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to( bboxes.device, dtype=bboxes.dtype) anchor_boxes = bbox_cxcywh_to_xyxy( torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1)) ious = bbox_overlaps(bboxes, anchor_boxes) max_ious, _ = ious.max(1) cost = 1 - max_ious.mean().item() return cost def main(): logger = get_root_logger() args = parse_args() cfg = args.config cfg = Config.fromfile(cfg) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) input_shape = args.input_shape assert len(input_shape) == 2 anchor_type = cfg.model.bbox_head.anchor_generator.type assert anchor_type == 'YOLOAnchorGenerator', \ f'Only support optimize YOLOAnchor, but get {anchor_type}.' base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes num_anchors = sum([len(sizes) for sizes in base_sizes]) train_data_cfg = cfg.data.train while 'dataset' in train_data_cfg: train_data_cfg = train_data_cfg['dataset'] dataset = build_dataset(train_data_cfg) if args.algorithm == 'k-means': optimizer = YOLOKMeansAnchorOptimizer( dataset=dataset, input_shape=input_shape, device=args.device, num_anchors=num_anchors, iters=args.iters, logger=logger, out_dir=args.output_dir) elif args.algorithm == 'differential_evolution': optimizer = YOLODEAnchorOptimizer( dataset=dataset, input_shape=input_shape, device=args.device, num_anchors=num_anchors, iters=args.iters, logger=logger, out_dir=args.output_dir) else: raise NotImplementedError( f'Only support k-means and differential_evolution, ' f'but get {args.algorithm}') optimizer.optimize() if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/robustness_eval.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from argparse import ArgumentParser import mmcv import numpy as np def print_coco_results(results): def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100): titleStr = 'Average Precision' if ap == 1 else 'Average Recall' typeStr = '(AP)' if ap == 1 else '(AR)' iouStr = '0.50:0.95' \ if iouThr is None else f'{iouThr:0.2f}' iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | ' iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}' print(iStr) stats = np.zeros((12, )) stats[0] = _print(results[0], 1) stats[1] = _print(results[1], 1, iouThr=.5) stats[2] = _print(results[2], 1, iouThr=.75) stats[3] = _print(results[3], 1, areaRng='small') stats[4] = _print(results[4], 1, areaRng='medium') stats[5] = _print(results[5], 1, areaRng='large') stats[6] = _print(results[6], 0, maxDets=1) stats[7] = _print(results[7], 0, maxDets=10) stats[8] = _print(results[8], 0) stats[9] = _print(results[9], 0, areaRng='small') stats[10] = _print(results[10], 0, areaRng='medium') stats[11] = _print(results[11], 0, areaRng='large') def get_coco_style_results(filename, task='bbox', metric=None, prints='mPC', aggregate='benchmark'): assert aggregate in ['benchmark', 'all'] if prints == 'all': prints = ['P', 'mPC', 'rPC'] elif isinstance(prints, str): prints = [prints] for p in prints: assert p in ['P', 'mPC', 'rPC'] if metric is None: metrics = [ 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', 'ARs', 'ARm', 'ARl' ] elif isinstance(metric, list): metrics = metric else: metrics = [metric] for metric_name in metrics: assert metric_name in [ 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', 'ARs', 'ARm', 'ARl' ] eval_output = mmcv.load(filename) num_distortions = len(list(eval_output.keys())) results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32') for corr_i, distortion in enumerate(eval_output): for severity in eval_output[distortion]: for metric_j, metric_name in enumerate(metrics): mAP = eval_output[distortion][severity][task][metric_name] results[corr_i, severity, metric_j] = mAP P = results[0, 0, :] if aggregate == 'benchmark': mPC = np.mean(results[:15, 1:, :], axis=(0, 1)) else: mPC = np.mean(results[:, 1:, :], axis=(0, 1)) rPC = mPC / P print(f'\nmodel: {osp.basename(filename)}') if metric is None: if 'P' in prints: print(f'Performance on Clean Data [P] ({task})') print_coco_results(P) if 'mPC' in prints: print(f'Mean Performance under Corruption [mPC] ({task})') print_coco_results(mPC) if 'rPC' in prints: print(f'Relative Performance under Corruption [rPC] ({task})') print_coco_results(rPC) else: if 'P' in prints: print(f'Performance on Clean Data [P] ({task})') for metric_i, metric_name in enumerate(metrics): print(f'{metric_name:5} = {P[metric_i]:0.3f}') if 'mPC' in prints: print(f'Mean Performance under Corruption [mPC] ({task})') for metric_i, metric_name in enumerate(metrics): print(f'{metric_name:5} = {mPC[metric_i]:0.3f}') if 'rPC' in prints: print(f'Relative Performance under Corruption [rPC] ({task})') for metric_i, metric_name in enumerate(metrics): print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %') return results def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'): assert aggregate in ['benchmark', 'all'] if prints == 'all': prints = ['P', 'mPC', 'rPC'] elif isinstance(prints, str): prints = [prints] for p in prints: assert p in ['P', 'mPC', 'rPC'] eval_output = mmcv.load(filename) num_distortions = len(list(eval_output.keys())) results = np.zeros((num_distortions, 6, 20), dtype='float32') for i, distortion in enumerate(eval_output): for severity in eval_output[distortion]: mAP = [ eval_output[distortion][severity][j]['ap'] for j in range(len(eval_output[distortion][severity])) ] results[i, severity, :] = mAP P = results[0, 0, :] if aggregate == 'benchmark': mPC = np.mean(results[:15, 1:, :], axis=(0, 1)) else: mPC = np.mean(results[:, 1:, :], axis=(0, 1)) rPC = mPC / P print(f'\nmodel: {osp.basename(filename)}') if 'P' in prints: print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}') if 'mPC' in prints: print('Mean Performance under Corruption [mPC] in AP50 = ' f'{np.mean(mPC):0.3f}') if 'rPC' in prints: print('Relative Performance under Corruption [rPC] in % = ' f'{np.mean(rPC) * 100:0.1f}') return np.mean(results, axis=2, keepdims=True) def get_results(filename, dataset='coco', task='bbox', metric=None, prints='mPC', aggregate='benchmark'): assert dataset in ['coco', 'voc', 'cityscapes'] if dataset in ['coco', 'cityscapes']: results = get_coco_style_results( filename, task=task, metric=metric, prints=prints, aggregate=aggregate) elif dataset == 'voc': if task != 'bbox': print('Only bbox analysis is supported for Pascal VOC') print('Will report bbox results\n') if metric not in [None, ['AP'], ['AP50']]: print('Only the AP50 metric is supported for Pascal VOC') print('Will report AP50 metric\n') results = get_voc_style_results( filename, prints=prints, aggregate=aggregate) return results def get_distortions_from_file(filename): eval_output = mmcv.load(filename) return get_distortions_from_results(eval_output) def get_distortions_from_results(eval_output): distortions = [] for i, distortion in enumerate(eval_output): distortions.append(distortion.replace('_', ' ')) return distortions def main(): parser = ArgumentParser(description='Corruption Result Analysis') parser.add_argument('filename', help='result file path') parser.add_argument( '--dataset', type=str, choices=['coco', 'voc', 'cityscapes'], default='coco', help='dataset type') parser.add_argument( '--task', type=str, nargs='+', choices=['bbox', 'segm'], default=['bbox'], help='task to report') parser.add_argument( '--metric', nargs='+', choices=[ None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', 'ARs', 'ARm', 'ARl' ], default=None, help='metric to report') parser.add_argument( '--prints', type=str, nargs='+', choices=['P', 'mPC', 'rPC'], default='mPC', help='corruption benchmark metric to print') parser.add_argument( '--aggregate', type=str, choices=['all', 'benchmark'], default='benchmark', help='aggregate all results or only those \ for benchmark corruptions') args = parser.parse_args() for task in args.task: get_results( args.filename, dataset=args.dataset, task=task, metric=args.metric, prints=args.prints, aggregate=args.aggregate) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/analysis_tools/test_robustness.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import copy import os import os.path as osp import mmcv import torch from mmcv import DictAction from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, wrap_fp16_model) from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from mmdet import datasets from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test from mmdet.core import eval_map from mmdet.datasets import build_dataloader, build_dataset from mmdet.models import build_detector from tools.analysis_tools.robustness_eval import get_results def coco_eval_with_return(result_files, result_types, coco, max_dets=(100, 300, 1000)): for res_type in result_types: assert res_type in ['proposal', 'bbox', 'segm', 'keypoints'] if mmcv.is_str(coco): coco = COCO(coco) assert isinstance(coco, COCO) eval_results = {} for res_type in result_types: result_file = result_files[res_type] assert result_file.endswith('.json') coco_dets = coco.loadRes(result_file) img_ids = coco.getImgIds() iou_type = 'bbox' if res_type == 'proposal' else res_type cocoEval = COCOeval(coco, coco_dets, iou_type) cocoEval.params.imgIds = img_ids if res_type == 'proposal': cocoEval.params.useCats = 0 cocoEval.params.maxDets = list(max_dets) cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if res_type == 'segm' or res_type == 'bbox': metric_names = [ 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', 'ARs', 'ARm', 'ARl' ] eval_results[res_type] = { metric_names[i]: cocoEval.stats[i] for i in range(len(metric_names)) } else: eval_results[res_type] = cocoEval.stats return eval_results def voc_eval_with_return(result_file, dataset, iou_thr=0.5, logger='print', only_ap=True): det_results = mmcv.load(result_file) annotations = [dataset.get_ann_info(i) for i in range(len(dataset))] if hasattr(dataset, 'year') and dataset.year == 2007: dataset_name = 'voc07' else: dataset_name = dataset.CLASSES mean_ap, eval_results = eval_map( det_results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=dataset_name, logger=logger) if only_ap: eval_results = [{ 'ap': eval_results[i]['ap'] } for i in range(len(eval_results))] return mean_ap, eval_results def parse_args(): parser = argparse.ArgumentParser(description='MMDet test detector') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--out', help='output result file') parser.add_argument( '--corruptions', type=str, nargs='+', default='benchmark', choices=[ 'all', 'benchmark', 'noise', 'blur', 'weather', 'digital', 'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate' ], help='corruptions') parser.add_argument( '--severities', type=int, nargs='+', default=[0, 1, 2, 3, 4, 5], help='corruption severity levels') parser.add_argument( '--eval', type=str, nargs='+', choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], help='eval types') parser.add_argument( '--iou-thr', type=float, default=0.5, help='IoU threshold for pascal voc evaluation') parser.add_argument( '--summaries', type=bool, default=False, help='Print summaries for every corruption and severity') parser.add_argument( '--workers', type=int, default=32, help='workers per gpu') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument( '--show-dir', help='directory where painted images will be saved') parser.add_argument( '--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)') parser.add_argument('--tmpdir', help='tmp dir for writing some results') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument( '--final-prints', type=str, nargs='+', choices=['P', 'mPC', 'rPC'], default='mPC', help='corruption benchmark metric to print at the end') parser.add_argument( '--final-prints-aggregate', type=str, choices=['all', 'benchmark'], default='benchmark', help='aggregate all results or only those for benchmark corruptions') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def main(): args = parse_args() assert args.out or args.show or args.show_dir, \ ('Please specify at least one operation (save or show the results) ' 'with the argument "--out", "--show" or "show-dir"') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True if args.workers == 0: args.workers = cfg.data.workers_per_gpu # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # set random seeds if args.seed is not None: set_random_seed(args.seed) if 'all' in args.corruptions: corruptions = [ 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate' ] elif 'benchmark' in args.corruptions: corruptions = [ 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression' ] elif 'noise' in args.corruptions: corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise'] elif 'blur' in args.corruptions: corruptions = [ 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur' ] elif 'weather' in args.corruptions: corruptions = ['snow', 'frost', 'fog', 'brightness'] elif 'digital' in args.corruptions: corruptions = [ 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression' ] elif 'holdout' in args.corruptions: corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate'] elif 'None' in args.corruptions: corruptions = ['None'] args.severities = [0] else: corruptions = args.corruptions rank, _ = get_dist_info() aggregated_results = {} for corr_i, corruption in enumerate(corruptions): aggregated_results[corruption] = {} for sev_i, corruption_severity in enumerate(args.severities): # evaluate severity 0 (= no corruption) only once if corr_i > 0 and corruption_severity == 0: aggregated_results[corruption][0] = \ aggregated_results[corruptions[0]][0] continue test_data_cfg = copy.deepcopy(cfg.data.test) # assign corruption and severity if corruption_severity > 0: corruption_trans = dict( type='Corrupt', corruption=corruption, severity=corruption_severity) # TODO: hard coded "1", we assume that the first step is # loading images, which needs to be fixed in the future test_data_cfg['pipeline'].insert(1, corruption_trans) # print info print(f'\nTesting {corruption} at severity {corruption_severity}') # build the dataloader # TODO: support multiple images per gpu # (only minor changes are needed) dataset = build_dataset(test_data_cfg) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=args.workers, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint( model, args.checkpoint, map_location='cpu') # old versions did not save class info in checkpoints, # this walkaround is for backward compatibility if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = MMDataParallel(model, device_ids=[0]) show_dir = args.show_dir if show_dir is not None: show_dir = osp.join(show_dir, corruption) show_dir = osp.join(show_dir, str(corruption_severity)) if not osp.exists(show_dir): osp.makedirs(show_dir) outputs = single_gpu_test(model, data_loader, args.show, show_dir, args.show_score_thr) else: model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir) if args.out and rank == 0: eval_results_filename = ( osp.splitext(args.out)[0] + '_results' + osp.splitext(args.out)[1]) mmcv.dump(outputs, args.out) eval_types = args.eval if cfg.dataset_type == 'VOCDataset': if eval_types: for eval_type in eval_types: if eval_type == 'bbox': test_dataset = mmcv.runner.obj_from_dict( cfg.data.test, datasets) logger = 'print' if args.summaries else None mean_ap, eval_results = \ voc_eval_with_return( args.out, test_dataset, args.iou_thr, logger) aggregated_results[corruption][ corruption_severity] = eval_results else: print('\nOnly "bbox" evaluation \ is supported for pascal voc') else: if eval_types: print(f'Starting evaluate {" and ".join(eval_types)}') if eval_types == ['proposal_fast']: result_file = args.out else: if not isinstance(outputs[0], dict): result_files = dataset.results2json( outputs, args.out) else: for name in outputs[0]: print(f'\nEvaluating {name}') outputs_ = [out[name] for out in outputs] result_file = args.out + f'.{name}' result_files = dataset.results2json( outputs_, result_file) eval_results = coco_eval_with_return( result_files, eval_types, dataset.coco) aggregated_results[corruption][ corruption_severity] = eval_results else: print('\nNo task was selected for evaluation;' '\nUse --eval to select a task') # save results after each evaluation mmcv.dump(aggregated_results, eval_results_filename) if rank == 0: # print final results print('\nAggregated results:') prints = args.final_prints aggregate = args.final_prints_aggregate if cfg.dataset_type == 'VOCDataset': get_results( eval_results_filename, dataset='voc', prints=prints, aggregate=aggregate) else: get_results( eval_results_filename, dataset='coco', prints=prints, aggregate=aggregate) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/dataset_converters/cityscapes.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import glob import os.path as osp import cityscapesscripts.helpers.labels as CSLabels import mmcv import numpy as np import pycocotools.mask as maskUtils def collect_files(img_dir, gt_dir): suffix = 'leftImg8bit.png' files = [] for img_file in glob.glob(osp.join(img_dir, '**/*.png')): assert img_file.endswith(suffix), img_file inst_file = gt_dir + img_file[ len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png' # Note that labelIds are not converted to trainId for seg map segm_file = gt_dir + img_file[ len(img_dir):-len(suffix)] + 'gtFine_labelIds.png' files.append((img_file, inst_file, segm_file)) assert len(files), f'No images found in {img_dir}' print(f'Loaded {len(files)} images from {img_dir}') return files def collect_annotations(files, nproc=1): print('Loading annotation images') if nproc > 1: images = mmcv.track_parallel_progress( load_img_info, files, nproc=nproc) else: images = mmcv.track_progress(load_img_info, files) return images def load_img_info(files): img_file, inst_file, segm_file = files inst_img = mmcv.imread(inst_file, 'unchanged') # ids < 24 are stuff labels (filtering them first is about 5% faster) unique_inst_ids = np.unique(inst_img[inst_img >= 24]) anno_info = [] for inst_id in unique_inst_ids: # For non-crowd annotations, inst_id // 1000 is the label_id # Crowd annotations have <1000 instance ids label_id = inst_id // 1000 if inst_id >= 1000 else inst_id label = CSLabels.id2label[label_id] if not label.hasInstances or label.ignoreInEval: continue category_id = label.id iscrowd = int(inst_id < 1000) mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F') mask_rle = maskUtils.encode(mask[:, :, None])[0] area = maskUtils.area(mask_rle) # convert to COCO style XYWH format bbox = maskUtils.toBbox(mask_rle) # for json encoding mask_rle['counts'] = mask_rle['counts'].decode() anno = dict( iscrowd=iscrowd, category_id=category_id, bbox=bbox.tolist(), area=area.tolist(), segmentation=mask_rle) anno_info.append(anno) video_name = osp.basename(osp.dirname(img_file)) img_info = dict( # remove img_prefix for filename file_name=osp.join(video_name, osp.basename(img_file)), height=inst_img.shape[0], width=inst_img.shape[1], anno_info=anno_info, segm_file=osp.join(video_name, osp.basename(segm_file))) return img_info def cvt_annotations(image_infos, out_json_name): out_json = dict() img_id = 0 ann_id = 0 out_json['images'] = [] out_json['categories'] = [] out_json['annotations'] = [] for image_info in image_infos: image_info['id'] = img_id anno_infos = image_info.pop('anno_info') out_json['images'].append(image_info) for anno_info in anno_infos: anno_info['image_id'] = img_id anno_info['id'] = ann_id out_json['annotations'].append(anno_info) ann_id += 1 img_id += 1 for label in CSLabels.labels: if label.hasInstances and not label.ignoreInEval: cat = dict(id=label.id, name=label.name) out_json['categories'].append(cat) if len(out_json['annotations']) == 0: out_json.pop('annotations') mmcv.dump(out_json, out_json_name) return out_json def parse_args(): parser = argparse.ArgumentParser( description='Convert Cityscapes annotations to COCO format') parser.add_argument('cityscapes_path', help='cityscapes data path') parser.add_argument('--img-dir', default='leftImg8bit', type=str) parser.add_argument('--gt-dir', default='gtFine', type=str) parser.add_argument('-o', '--out-dir', help='output path') parser.add_argument( '--nproc', default=1, type=int, help='number of process') args = parser.parse_args() return args def main(): args = parse_args() cityscapes_path = args.cityscapes_path out_dir = args.out_dir if args.out_dir else cityscapes_path mmcv.mkdir_or_exist(out_dir) img_dir = osp.join(cityscapes_path, args.img_dir) gt_dir = osp.join(cityscapes_path, args.gt_dir) set_name = dict( train='instancesonly_filtered_gtFine_train.json', val='instancesonly_filtered_gtFine_val.json', test='instancesonly_filtered_gtFine_test.json') for split, json_name in set_name.items(): print(f'Converting {split} into {json_name}') with mmcv.Timer( print_tmpl='It took {}s to convert Cityscapes annotation'): files = collect_files( osp.join(img_dir, split), osp.join(gt_dir, split)) image_infos = collect_annotations(files, nproc=args.nproc) cvt_annotations(image_infos, osp.join(out_dir, json_name)) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/dataset_converters/images2coco.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os import mmcv from PIL import Image def parse_args(): parser = argparse.ArgumentParser( description='Convert images to coco format without annotations') parser.add_argument('img_path', help='The root path of images') parser.add_argument( 'classes', type=str, help='The text file name of storage class list') parser.add_argument( 'out', type=str, help='The output annotation json file name, The save dir is in the ' 'same directory as img_path') parser.add_argument( '-e', '--exclude-extensions', type=str, nargs='+', help='The suffix of images to be excluded, such as "png" and "bmp"') args = parser.parse_args() return args def collect_image_infos(path, exclude_extensions=None): img_infos = [] images_generator = mmcv.scandir(path, recursive=True) for image_path in mmcv.track_iter_progress(list(images_generator)): if exclude_extensions is None or ( exclude_extensions is not None and not image_path.lower().endswith(exclude_extensions)): image_path = os.path.join(path, image_path) img_pillow = Image.open(image_path) img_info = { 'filename': image_path, 'width': img_pillow.width, 'height': img_pillow.height, } img_infos.append(img_info) return img_infos def cvt_to_coco_json(img_infos, classes): image_id = 0 coco = dict() coco['images'] = [] coco['type'] = 'instance' coco['categories'] = [] coco['annotations'] = [] image_set = set() for category_id, name in enumerate(classes): category_item = dict() category_item['supercategory'] = str('none') category_item['id'] = int(category_id) category_item['name'] = str(name) coco['categories'].append(category_item) for img_dict in img_infos: file_name = img_dict['filename'] assert file_name not in image_set image_item = dict() image_item['id'] = int(image_id) image_item['file_name'] = str(file_name) image_item['height'] = int(img_dict['height']) image_item['width'] = int(img_dict['width']) coco['images'].append(image_item) image_set.add(file_name) image_id += 1 return coco def main(): args = parse_args() assert args.out.endswith( 'json'), 'The output file name must be json suffix' # 1 load image list info img_infos = collect_image_infos(args.img_path, args.exclude_extensions) # 2 convert to coco format data classes = mmcv.list_from_file(args.classes) coco_info = cvt_to_coco_json(img_infos, classes) # 3 dump save_dir = os.path.join(args.img_path, '..', 'annotations') mmcv.mkdir_or_exist(save_dir) save_path = os.path.join(save_dir, args.out) mmcv.dump(coco_info, save_path) print(f'save json file: {save_path}') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/dataset_converters/pascal_voc.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import xml.etree.ElementTree as ET import mmcv import numpy as np from mmdet.core import voc_classes label_ids = {name: i for i, name in enumerate(voc_classes())} def parse_xml(args): xml_path, img_path = args tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') w = int(size.find('width').text) h = int(size.find('height').text) bboxes = [] labels = [] bboxes_ignore = [] labels_ignore = [] for obj in root.findall('object'): name = obj.find('name').text label = label_ids[name] difficult = int(obj.find('difficult').text) bnd_box = obj.find('bndbox') bbox = [ int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text) ] if difficult: bboxes_ignore.append(bbox) labels_ignore.append(label) else: bboxes.append(bbox) labels.append(label) if not bboxes: bboxes = np.zeros((0, 4)) labels = np.zeros((0, )) else: bboxes = np.array(bboxes, ndmin=2) - 1 labels = np.array(labels) if not bboxes_ignore: bboxes_ignore = np.zeros((0, 4)) labels_ignore = np.zeros((0, )) else: bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 labels_ignore = np.array(labels_ignore) annotation = { 'filename': img_path, 'width': w, 'height': h, 'ann': { 'bboxes': bboxes.astype(np.float32), 'labels': labels.astype(np.int64), 'bboxes_ignore': bboxes_ignore.astype(np.float32), 'labels_ignore': labels_ignore.astype(np.int64) } } return annotation def cvt_annotations(devkit_path, years, split, out_file): if not isinstance(years, list): years = [years] annotations = [] for year in years: filelist = osp.join(devkit_path, f'VOC{year}/ImageSets/Main/{split}.txt') if not osp.isfile(filelist): print(f'filelist does not exist: {filelist}, ' f'skip voc{year} {split}') return img_names = mmcv.list_from_file(filelist) xml_paths = [ osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml') for img_name in img_names ] img_paths = [ f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names ] part_annotations = mmcv.track_progress(parse_xml, list(zip(xml_paths, img_paths))) annotations.extend(part_annotations) if out_file.endswith('json'): annotations = cvt_to_coco_json(annotations) mmcv.dump(annotations, out_file) return annotations def cvt_to_coco_json(annotations): image_id = 0 annotation_id = 0 coco = dict() coco['images'] = [] coco['type'] = 'instance' coco['categories'] = [] coco['annotations'] = [] image_set = set() def addAnnItem(annotation_id, image_id, category_id, bbox, difficult_flag): annotation_item = dict() annotation_item['segmentation'] = [] seg = [] # bbox[] is x1,y1,x2,y2 # left_top seg.append(int(bbox[0])) seg.append(int(bbox[1])) # left_bottom seg.append(int(bbox[0])) seg.append(int(bbox[3])) # right_bottom seg.append(int(bbox[2])) seg.append(int(bbox[3])) # right_top seg.append(int(bbox[2])) seg.append(int(bbox[1])) annotation_item['segmentation'].append(seg) xywh = np.array( [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]) annotation_item['area'] = int(xywh[2] * xywh[3]) if difficult_flag == 1: annotation_item['ignore'] = 0 annotation_item['iscrowd'] = 1 else: annotation_item['ignore'] = 0 annotation_item['iscrowd'] = 0 annotation_item['image_id'] = int(image_id) annotation_item['bbox'] = xywh.astype(int).tolist() annotation_item['category_id'] = int(category_id) annotation_item['id'] = int(annotation_id) coco['annotations'].append(annotation_item) return annotation_id + 1 for category_id, name in enumerate(voc_classes()): category_item = dict() category_item['supercategory'] = str('none') category_item['id'] = int(category_id) category_item['name'] = str(name) coco['categories'].append(category_item) for ann_dict in annotations: file_name = ann_dict['filename'] ann = ann_dict['ann'] assert file_name not in image_set image_item = dict() image_item['id'] = int(image_id) image_item['file_name'] = str(file_name) image_item['height'] = int(ann_dict['height']) image_item['width'] = int(ann_dict['width']) coco['images'].append(image_item) image_set.add(file_name) bboxes = ann['bboxes'][:, :4] labels = ann['labels'] for bbox_id in range(len(bboxes)): bbox = bboxes[bbox_id] label = labels[bbox_id] annotation_id = addAnnItem( annotation_id, image_id, label, bbox, difficult_flag=0) bboxes_ignore = ann['bboxes_ignore'][:, :4] labels_ignore = ann['labels_ignore'] for bbox_id in range(len(bboxes_ignore)): bbox = bboxes_ignore[bbox_id] label = labels_ignore[bbox_id] annotation_id = addAnnItem( annotation_id, image_id, label, bbox, difficult_flag=1) image_id += 1 return coco def parse_args(): parser = argparse.ArgumentParser( description='Convert PASCAL VOC annotations to mmdetection format') parser.add_argument('devkit_path', help='pascal voc devkit path') parser.add_argument('-o', '--out-dir', help='output path') parser.add_argument( '--out-format', default='pkl', choices=('pkl', 'coco'), help='output format, "coco" indicates coco annotation format') args = parser.parse_args() return args def main(): args = parse_args() devkit_path = args.devkit_path out_dir = args.out_dir if args.out_dir else devkit_path mmcv.mkdir_or_exist(out_dir) years = [] if osp.isdir(osp.join(devkit_path, 'VOC2007')): years.append('2007') if osp.isdir(osp.join(devkit_path, 'VOC2012')): years.append('2012') if '2007' in years and '2012' in years: years.append(['2007', '2012']) if not years: raise IOError(f'The devkit path {devkit_path} contains neither ' '"VOC2007" nor "VOC2012" subfolder') out_fmt = f'.{args.out_format}' if args.out_format == 'coco': out_fmt = '.json' for year in years: if year == '2007': prefix = 'voc07' elif year == '2012': prefix = 'voc12' elif year == ['2007', '2012']: prefix = 'voc0712' for split in ['train', 'val', 'trainval']: dataset_name = prefix + '_' + split print(f'processing {dataset_name} ...') cvt_annotations(devkit_path, year, split, osp.join(out_dir, dataset_name + out_fmt)) if not isinstance(year, list): dataset_name = prefix + '_test' print(f'processing {dataset_name} ...') cvt_annotations(devkit_path, year, 'test', osp.join(out_dir, dataset_name + out_fmt)) print('Done!') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/deployment/mmdet2torchserve.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. from argparse import ArgumentParser, Namespace from pathlib import Path from tempfile import TemporaryDirectory import mmcv try: from model_archiver.model_packaging import package_model from model_archiver.model_packaging_utils import ModelExportUtils except ImportError: package_model = None def mmdet2torchserve( config_file: str, checkpoint_file: str, output_folder: str, model_name: str, model_version: str = '1.0', force: bool = False, ): """Converts MMDetection model (config + checkpoint) to TorchServe `.mar`. Args: config_file: In MMDetection config format. The contents vary for each task repository. checkpoint_file: In MMDetection checkpoint format. The contents vary for each task repository. output_folder: Folder where `{model_name}.mar` will be created. The file created will be in TorchServe archive format. model_name: If not None, used for naming the `{model_name}.mar` file that will be created under `output_folder`. If None, `{Path(checkpoint_file).stem}` will be used. model_version: Model's version. force: If True, if there is an existing `{model_name}.mar` file under `output_folder` it will be overwritten. """ mmcv.mkdir_or_exist(output_folder) config = mmcv.Config.fromfile(config_file) with TemporaryDirectory() as tmpdir: config.dump(f'{tmpdir}/config.py') args = Namespace( **{ 'model_file': f'{tmpdir}/config.py', 'serialized_file': checkpoint_file, 'handler': f'{Path(__file__).parent}/mmdet_handler.py', 'model_name': model_name or Path(checkpoint_file).stem, 'version': model_version, 'export_path': output_folder, 'force': force, 'requirements_file': None, 'extra_files': None, 'runtime': 'python', 'archive_format': 'default' }) manifest = ModelExportUtils.generate_manifest_json(args) package_model(args, manifest) def parse_args(): parser = ArgumentParser( description='Convert MMDetection models to TorchServe `.mar` format.') parser.add_argument('config', type=str, help='config file path') parser.add_argument('checkpoint', type=str, help='checkpoint file path') parser.add_argument( '--output-folder', type=str, required=True, help='Folder where `{model_name}.mar` will be created.') parser.add_argument( '--model-name', type=str, default=None, help='If not None, used for naming the `{model_name}.mar`' 'file that will be created under `output_folder`.' 'If None, `{Path(checkpoint_file).stem}` will be used.') parser.add_argument( '--model-version', type=str, default='1.0', help='Number used for versioning.') parser.add_argument( '-f', '--force', action='store_true', help='overwrite the existing `{model_name}.mar`') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() if package_model is None: raise ImportError('`torch-model-archiver` is required.' 'Try: pip install torch-model-archiver') mmdet2torchserve(args.config, args.checkpoint, args.output_folder, args.model_name, args.model_version, args.force) ================================================ FILE: DLTA_AI_app/mmdetection/tools/deployment/mmdet_handler.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import base64 import os import mmcv import torch from ts.torch_handler.base_handler import BaseHandler from mmdet.apis import inference_detector, init_detector class MMdetHandler(BaseHandler): threshold = 0.5 def initialize(self, context): properties = context.system_properties self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' self.device = torch.device(self.map_location + ':' + str(properties.get('gpu_id')) if torch.cuda. is_available() else self.map_location) self.manifest = context.manifest model_dir = properties.get('model_dir') serialized_file = self.manifest['model']['serializedFile'] checkpoint = os.path.join(model_dir, serialized_file) self.config_file = os.path.join(model_dir, 'config.py') self.model = init_detector(self.config_file, checkpoint, self.device) self.initialized = True def preprocess(self, data): images = [] for row in data: image = row.get('data') or row.get('body') if isinstance(image, str): image = base64.b64decode(image) image = mmcv.imfrombytes(image) images.append(image) return images def inference(self, data, *args, **kwargs): results = inference_detector(self.model, data) return results def postprocess(self, data): # Format output following the example ObjectDetectionHandler format output = [] for image_index, image_result in enumerate(data): output.append([]) if isinstance(image_result, tuple): bbox_result, segm_result = image_result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = image_result, None for class_index, class_result in enumerate(bbox_result): class_name = self.model.CLASSES[class_index] for bbox in class_result: bbox_coords = bbox[:-1].tolist() score = float(bbox[-1]) if score >= self.threshold: output[image_index].append({ 'class_name': class_name, 'bbox': bbox_coords, 'score': score }) return output ================================================ FILE: DLTA_AI_app/mmdetection/tools/deployment/onnx2tensorrt.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp import warnings import numpy as np import onnx import torch from mmcv import Config from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine from mmdet.core.export import preprocess_example_input from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector, TensorRTDetector) from mmdet.datasets import DATASETS def get_GiB(x: int): """return x GiB.""" return x * (1 << 30) def onnx2tensorrt(onnx_file, trt_file, input_config, verify=False, show=False, workspace_size=1, verbose=False): import tensorrt as trt onnx_model = onnx.load(onnx_file) max_shape = input_config['max_shape'] min_shape = input_config['min_shape'] opt_shape = input_config['opt_shape'] fp16_mode = False # create trt engine and wrapper opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]} max_workspace_size = get_GiB(workspace_size) trt_engine = onnx2trt( onnx_model, opt_shape_dict, log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR, fp16_mode=fp16_mode, max_workspace_size=max_workspace_size) save_dir, _ = osp.split(trt_file) if save_dir: os.makedirs(save_dir, exist_ok=True) save_trt_engine(trt_engine, trt_file) print(f'Successfully created TensorRT engine: {trt_file}') if verify: # prepare input one_img, one_meta = preprocess_example_input(input_config) img_list, img_meta_list = [one_img], [[one_meta]] img_list = [_.cuda().contiguous() for _ in img_list] # wrap ONNX and TensorRT model onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0) trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0) # inference with wrapped model with torch.no_grad(): onnx_results = onnx_model( img_list, img_metas=img_meta_list, return_loss=False)[0] trt_results = trt_model( img_list, img_metas=img_meta_list, return_loss=False)[0] if show: out_file_ort, out_file_trt = None, None else: out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png' show_img = one_meta['show_img'] score_thr = 0.3 onnx_model.show_result( show_img, onnx_results, score_thr=score_thr, show=True, win_name='ONNXRuntime', out_file=out_file_ort) trt_model.show_result( show_img, trt_results, score_thr=score_thr, show=True, win_name='TensorRT', out_file=out_file_trt) with_mask = trt_model.with_masks # compare a part of result if with_mask: compare_pairs = list(zip(onnx_results, trt_results)) else: compare_pairs = [(onnx_results, trt_results)] err_msg = 'The numerical values are different between Pytorch' + \ ' and ONNX, but it does not necessarily mean the' + \ ' exported ONNX model is problematic.' # check the numerical value for onnx_res, pytorch_res in compare_pairs: for o_res, p_res in zip(onnx_res, pytorch_res): np.testing.assert_allclose( o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg) print('The numerical values are the same between Pytorch and ONNX') def parse_normalize_cfg(test_pipeline): transforms = None for pipeline in test_pipeline: if 'transforms' in pipeline: transforms = pipeline['transforms'] break assert transforms is not None, 'Failed to find `transforms`' norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize'] assert len(norm_config_li) == 1, '`norm_config` should only have one' norm_config = norm_config_li[0] return norm_config def parse_args(): parser = argparse.ArgumentParser( description='Convert MMDetection models from ONNX to TensorRT') parser.add_argument('config', help='test config file path') parser.add_argument('model', help='Filename of input ONNX model') parser.add_argument( '--trt-file', type=str, default='tmp.trt', help='Filename of output TensorRT engine') parser.add_argument( '--input-img', type=str, default='', help='Image for test') parser.add_argument( '--show', action='store_true', help='Whether to show output results') parser.add_argument( '--dataset', type=str, default='coco', help='Dataset name. This argument is deprecated and will be \ removed in future releases.') parser.add_argument( '--verify', action='store_true', help='Verify the outputs of ONNXRuntime and TensorRT') parser.add_argument( '--verbose', action='store_true', help='Whether to verbose logging messages while creating \ TensorRT engine. Defaults to False.') parser.add_argument( '--to-rgb', action='store_false', help='Feed model with RGB or BGR image. Default is RGB. This \ argument is deprecated and will be removed in future releases.') parser.add_argument( '--shape', type=int, nargs='+', default=[400, 600], help='Input size of the model') parser.add_argument( '--mean', type=float, nargs='+', default=[123.675, 116.28, 103.53], help='Mean value used for preprocess input data. This argument \ is deprecated and will be removed in future releases.') parser.add_argument( '--std', type=float, nargs='+', default=[58.395, 57.12, 57.375], help='Variance value used for preprocess input data. \ This argument is deprecated and will be removed in future releases.') parser.add_argument( '--min-shape', type=int, nargs='+', default=None, help='Minimum input size of the model in TensorRT') parser.add_argument( '--max-shape', type=int, nargs='+', default=None, help='Maximum input size of the model in TensorRT') parser.add_argument( '--workspace-size', type=int, default=1, help='Max workspace size in GiB') args = parser.parse_args() return args if __name__ == '__main__': assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.' args = parse_args() warnings.warn( 'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \ parsed directly from config file and are deprecated and will be \ removed in future releases.') if not args.input_img: args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg') cfg = Config.fromfile(args.config) def parse_shape(shape): if len(shape) == 1: shape = (1, 3, shape[0], shape[0]) elif len(args.shape) == 2: shape = (1, 3) + tuple(shape) else: raise ValueError('invalid input shape') return shape if args.shape: input_shape = parse_shape(args.shape) else: img_scale = cfg.test_pipeline[1]['img_scale'] input_shape = (1, 3, img_scale[1], img_scale[0]) if not args.max_shape: max_shape = input_shape else: max_shape = parse_shape(args.max_shape) if not args.min_shape: min_shape = input_shape else: min_shape = parse_shape(args.min_shape) dataset = DATASETS.get(cfg.data.test['type']) assert (dataset is not None) CLASSES = dataset.CLASSES normalize_cfg = parse_normalize_cfg(cfg.test_pipeline) input_config = { 'min_shape': min_shape, 'opt_shape': input_shape, 'max_shape': max_shape, 'input_shape': input_shape, 'input_path': args.input_img, 'normalize_cfg': normalize_cfg } # Create TensorRT engine onnx2tensorrt( args.model, args.trt_file, input_config, verify=args.verify, show=args.show, workspace_size=args.workspace_size, verbose=args.verbose) # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This tool will be deprecated in future. ' msg += blue_text + 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ================================================ FILE: DLTA_AI_app/mmdetection/tools/deployment/pytorch2onnx.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import warnings from functools import partial import numpy as np import onnx import torch from mmcv import Config, DictAction from mmdet.core.export import build_model_from_cfg, preprocess_example_input from mmdet.core.export.model_wrappers import ONNXRuntimeDetector def pytorch2onnx(model, input_img, input_shape, normalize_cfg, opset_version=11, show=False, output_file='tmp.onnx', verify=False, test_img=None, do_simplify=False, dynamic_export=None, skip_postprocess=False): input_config = { 'input_shape': input_shape, 'input_path': input_img, 'normalize_cfg': normalize_cfg } # prepare input one_img, one_meta = preprocess_example_input(input_config) img_list, img_meta_list = [one_img], [[one_meta]] if skip_postprocess: warnings.warn('Not all models support export onnx without post ' 'process, especially two stage detectors!') model.forward = model.forward_dummy torch.onnx.export( model, one_img, output_file, input_names=['input'], export_params=True, keep_initializers_as_inputs=True, do_constant_folding=True, verbose=show, opset_version=opset_version) print(f'Successfully exported ONNX model without ' f'post process: {output_file}') return # replace original forward function origin_forward = model.forward model.forward = partial( model.forward, img_metas=img_meta_list, return_loss=False, rescale=False) output_names = ['dets', 'labels'] if model.with_mask: output_names.append('masks') input_name = 'input' dynamic_axes = None if dynamic_export: dynamic_axes = { input_name: { 0: 'batch', 2: 'height', 3: 'width' }, 'dets': { 0: 'batch', 1: 'num_dets', }, 'labels': { 0: 'batch', 1: 'num_dets', }, } if model.with_mask: dynamic_axes['masks'] = {0: 'batch', 1: 'num_dets'} torch.onnx.export( model, img_list, output_file, input_names=[input_name], output_names=output_names, export_params=True, keep_initializers_as_inputs=True, do_constant_folding=True, verbose=show, opset_version=opset_version, dynamic_axes=dynamic_axes) model.forward = origin_forward if do_simplify: import onnxsim from mmdet import digit_version min_required_version = '0.4.0' assert digit_version(onnxsim.__version__) >= digit_version( min_required_version ), f'Requires to install onnxsim>={min_required_version}' model_opt, check_ok = onnxsim.simplify(output_file) if check_ok: onnx.save(model_opt, output_file) print(f'Successfully simplified ONNX model: {output_file}') else: warnings.warn('Failed to simplify ONNX model.') print(f'Successfully exported ONNX model: {output_file}') if verify: # check by onnx onnx_model = onnx.load(output_file) onnx.checker.check_model(onnx_model) # wrap onnx model onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0) if dynamic_export: # scale up to test dynamic shape h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]] h, w = min(1344, h), min(1344, w) input_config['input_shape'] = (1, 3, h, w) if test_img is None: input_config['input_path'] = input_img # prepare input once again one_img, one_meta = preprocess_example_input(input_config) img_list, img_meta_list = [one_img], [[one_meta]] # get pytorch output with torch.no_grad(): pytorch_results = model( img_list, img_metas=img_meta_list, return_loss=False, rescale=True)[0] img_list = [_.cuda().contiguous() for _ in img_list] if dynamic_export: img_list = img_list + [_.flip(-1).contiguous() for _ in img_list] img_meta_list = img_meta_list * 2 # get onnx output onnx_results = onnx_model( img_list, img_metas=img_meta_list, return_loss=False)[0] # visualize predictions score_thr = 0.3 if show: out_file_ort, out_file_pt = None, None else: out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png' show_img = one_meta['show_img'] model.show_result( show_img, pytorch_results, score_thr=score_thr, show=True, win_name='PyTorch', out_file=out_file_pt) onnx_model.show_result( show_img, onnx_results, score_thr=score_thr, show=True, win_name='ONNXRuntime', out_file=out_file_ort) # compare a part of result if model.with_mask: compare_pairs = list(zip(onnx_results, pytorch_results)) else: compare_pairs = [(onnx_results, pytorch_results)] err_msg = 'The numerical values are different between Pytorch' + \ ' and ONNX, but it does not necessarily mean the' + \ ' exported ONNX model is problematic.' # check the numerical value for onnx_res, pytorch_res in compare_pairs: for o_res, p_res in zip(onnx_res, pytorch_res): np.testing.assert_allclose( o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg) print('The numerical values are the same between Pytorch and ONNX') def parse_normalize_cfg(test_pipeline): transforms = None for pipeline in test_pipeline: if 'transforms' in pipeline: transforms = pipeline['transforms'] break assert transforms is not None, 'Failed to find `transforms`' norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize'] assert len(norm_config_li) == 1, '`norm_config` should only have one' norm_config = norm_config_li[0] return norm_config def parse_args(): parser = argparse.ArgumentParser( description='Convert MMDetection models to ONNX') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--input-img', type=str, help='Images for input') parser.add_argument( '--show', action='store_true', help='Show onnx graph and detection outputs') parser.add_argument('--output-file', type=str, default='tmp.onnx') parser.add_argument('--opset-version', type=int, default=11) parser.add_argument( '--test-img', type=str, default=None, help='Images for test') parser.add_argument( '--dataset', type=str, default='coco', help='Dataset name. This argument is deprecated and will be removed \ in future releases.') parser.add_argument( '--verify', action='store_true', help='verify the onnx model output against pytorch output') parser.add_argument( '--simplify', action='store_true', help='Whether to simplify onnx model.') parser.add_argument( '--shape', type=int, nargs='+', default=[800, 1216], help='input image size') parser.add_argument( '--mean', type=float, nargs='+', default=[123.675, 116.28, 103.53], help='mean value used for preprocess input data.This argument \ is deprecated and will be removed in future releases.') parser.add_argument( '--std', type=float, nargs='+', default=[58.395, 57.12, 57.375], help='variance value used for preprocess input data. ' 'This argument is deprecated and will be removed in future releases.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='Override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--dynamic-export', action='store_true', help='Whether to export onnx with dynamic axis.') parser.add_argument( '--skip-postprocess', action='store_true', help='Whether to export model without post process. Experimental ' 'option. We do not guarantee the correctness of the exported ' 'model.') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \ parsed directly from config file and are deprecated and \ will be removed in future releases.') assert args.opset_version == 11, 'MMDet only support opset 11 now' try: from mmcv.onnx.symbolic import register_extra_symbolics except ModuleNotFoundError: raise NotImplementedError('please update mmcv to version>=v1.0.4') register_extra_symbolics(args.opset_version) cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) if args.shape is None: img_scale = cfg.test_pipeline[1]['img_scale'] input_shape = (1, 3, img_scale[1], img_scale[0]) elif len(args.shape) == 1: input_shape = (1, 3, args.shape[0], args.shape[0]) elif len(args.shape) == 2: input_shape = (1, 3) + tuple(args.shape) else: raise ValueError('invalid input shape') # build the model and load checkpoint model = build_model_from_cfg(args.config, args.checkpoint, args.cfg_options) if not args.input_img: args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg') normalize_cfg = parse_normalize_cfg(cfg.test_pipeline) # convert model to onnx file pytorch2onnx( model, args.input_img, input_shape, normalize_cfg, opset_version=args.opset_version, show=args.show, output_file=args.output_file, verify=args.verify, test_img=args.test_img, do_simplify=args.simplify, dynamic_export=args.dynamic_export, skip_postprocess=args.skip_postprocess) # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This tool will be deprecated in future. ' msg += blue_text + 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ================================================ FILE: DLTA_AI_app/mmdetection/tools/deployment/test.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import warnings import mmcv from mmcv import Config, DictAction from mmcv.parallel import MMDataParallel from mmdet.apis import single_gpu_test from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor) from mmdet.utils import compat_cfg def parse_args(): parser = argparse.ArgumentParser( description='MMDet test (and eval) an ONNX model using ONNXRuntime') parser.add_argument('config', help='test config file path') parser.add_argument('model', help='Input model file') parser.add_argument('--out', help='output result file in pickle format') parser.add_argument( '--format-only', action='store_true', help='Format the output results without perform evaluation. It is' 'useful when you want to format the result to a specific format and ' 'submit it to the test server') parser.add_argument( '--backend', required=True, choices=['onnxruntime', 'tensorrt'], help='Backend for input model to run. ') parser.add_argument( '--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox",' ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument( '--show-dir', help='directory where painted images will be saved') parser.add_argument( '--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy ' 'format will be kwargs for dataset.evaluate() function') args = parser.parse_args() return args def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) cfg = compat_cfg(cfg) # in case the test dataset is concatenated samples_per_gpu = 1 if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor( cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True samples_per_gpu = max( [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) if samples_per_gpu > 1: for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) if args.backend == 'onnxruntime': from mmdet.core.export.model_wrappers import ONNXRuntimeDetector model = ONNXRuntimeDetector( args.model, class_names=dataset.CLASSES, device_id=0) elif args.backend == 'tensorrt': from mmdet.core.export.model_wrappers import TensorRTDetector model = TensorRTDetector( args.model, class_names=dataset.CLASSES, device_id=0) model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) print(dataset.evaluate(outputs, **eval_kwargs)) if __name__ == '__main__': main() # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This tool will be deprecated in future. ' msg += blue_text + 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) ================================================ FILE: DLTA_AI_app/mmdetection/tools/deployment/test_torchserver.py ================================================ from argparse import ArgumentParser import numpy as np import requests from mmdet.apis import inference_detector, init_detector, show_result_pyplot from mmdet.core import bbox2result def parse_args(): parser = ArgumentParser() parser.add_argument('img', help='Image file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument('model_name', help='The model name in the server') parser.add_argument( '--inference-addr', default='127.0.0.1:8080', help='Address and port of the inference server') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--score-thr', type=float, default=0.5, help='bbox score threshold') args = parser.parse_args() return args def parse_result(input, model_class): bbox = [] label = [] score = [] for anchor in input: bbox.append(anchor['bbox']) label.append(model_class.index(anchor['class_name'])) score.append([anchor['score']]) bboxes = np.append(bbox, score, axis=1) labels = np.array(label) result = bbox2result(bboxes, labels, len(model_class)) return result def main(args): # build the model from a config file and a checkpoint file model = init_detector(args.config, args.checkpoint, device=args.device) # test a single image model_result = inference_detector(model, args.img) for i, anchor_set in enumerate(model_result): anchor_set = anchor_set[anchor_set[:, 4] >= 0.5] model_result[i] = anchor_set # show the results show_result_pyplot( model, args.img, model_result, score_thr=args.score_thr, title='pytorch_result') url = 'http://' + args.inference_addr + '/predictions/' + args.model_name with open(args.img, 'rb') as image: response = requests.post(url, image) server_result = parse_result(response.json(), model.CLASSES) show_result_pyplot( model, args.img, server_result, score_thr=args.score_thr, title='server_result') for i in range(len(model.CLASSES)): assert np.allclose(model_result[i], server_result[i]) if __name__ == '__main__': args = parse_args() main(args) ================================================ FILE: DLTA_AI_app/mmdetection/tools/dist_test.sh ================================================ #!/usr/bin/env bash CONFIG=$1 CHECKPOINT=$2 GPUS=$3 NNODES=${NNODES:-1} NODE_RANK=${NODE_RANK:-0} PORT=${PORT:-29500} MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ python -m torch.distributed.launch \ --nnodes=$NNODES \ --node_rank=$NODE_RANK \ --master_addr=$MASTER_ADDR \ --nproc_per_node=$GPUS \ --master_port=$PORT \ $(dirname "$0")/test.py \ $CONFIG \ $CHECKPOINT \ --launcher pytorch \ ${@:4} ================================================ FILE: DLTA_AI_app/mmdetection/tools/dist_train.sh ================================================ #!/usr/bin/env bash CONFIG=$1 GPUS=$2 NNODES=${NNODES:-1} NODE_RANK=${NODE_RANK:-0} PORT=${PORT:-29500} MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ python -m torch.distributed.launch \ --nnodes=$NNODES \ --node_rank=$NODE_RANK \ --master_addr=$MASTER_ADDR \ --nproc_per_node=$GPUS \ --master_port=$PORT \ $(dirname "$0")/train.py \ $CONFIG \ --seed 0 \ --launcher pytorch ${@:3} ================================================ FILE: DLTA_AI_app/mmdetection/tools/misc/browse_dataset.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os from collections import Sequence from pathlib import Path import mmcv import numpy as np from mmcv import Config, DictAction from mmdet.core.utils import mask2ndarray from mmdet.core.visualization import imshow_det_bboxes from mmdet.datasets.builder import build_dataset from mmdet.utils import replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Browse a dataset') parser.add_argument('config', help='train config file path') parser.add_argument( '--skip-type', type=str, nargs='+', default=['DefaultFormatBundle', 'Normalize', 'Collect'], help='skip some useless pipeline') parser.add_argument( '--output-dir', default=None, type=str, help='If there is no display interface, you can save it') parser.add_argument('--not-show', default=False, action='store_true') parser.add_argument( '--show-interval', type=float, default=2, help='the interval of show (s)') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') args = parser.parse_args() return args def retrieve_data_cfg(config_path, skip_type, cfg_options): def skip_pipeline_steps(config): config['pipeline'] = [ x for x in config.pipeline if x['type'] not in skip_type ] cfg = Config.fromfile(config_path) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) if cfg_options is not None: cfg.merge_from_dict(cfg_options) train_data_cfg = cfg.data.train while 'dataset' in train_data_cfg and train_data_cfg[ 'type'] != 'MultiImageMixDataset': train_data_cfg = train_data_cfg['dataset'] if isinstance(train_data_cfg, Sequence): [skip_pipeline_steps(c) for c in train_data_cfg] else: skip_pipeline_steps(train_data_cfg) return cfg def main(): args = parse_args() cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options) if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']: cfg.data.train.pipeline = [ p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale' ] dataset = build_dataset(cfg.data.train) progress_bar = mmcv.ProgressBar(len(dataset)) for item in dataset: filename = os.path.join(args.output_dir, Path(item['filename']).name ) if args.output_dir is not None else None gt_bboxes = item['gt_bboxes'] gt_labels = item['gt_labels'] gt_masks = item.get('gt_masks', None) if gt_masks is not None: gt_masks = mask2ndarray(gt_masks) gt_seg = item.get('gt_semantic_seg', None) if gt_seg is not None: pad_value = 255 # the padding value of gt_seg sem_labels = np.unique(gt_seg) all_labels = np.concatenate((gt_labels, sem_labels), axis=0) all_labels, counts = np.unique(all_labels, return_counts=True) stuff_labels = all_labels[np.logical_and(counts < 2, all_labels != pad_value)] stuff_masks = gt_seg[None] == stuff_labels[:, None, None] gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0) gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)), axis=0) # If you need to show the bounding boxes, # please comment the following line gt_bboxes = None imshow_det_bboxes( item['img'], gt_bboxes, gt_labels, gt_masks, class_names=dataset.CLASSES, show=not args.not_show, wait_time=args.show_interval, out_file=filename, bbox_color=dataset.PALETTE, text_color=(200, 200, 200), mask_color=dataset.PALETTE) progress_bar.update() if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/misc/download_dataset.py ================================================ import argparse from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from tarfile import TarFile from zipfile import ZipFile import torch def parse_args(): parser = argparse.ArgumentParser( description='Download datasets for training') parser.add_argument( '--dataset-name', type=str, help='dataset name', default='coco2017') parser.add_argument( '--save-dir', type=str, help='the dir to save dataset', default='data/coco') parser.add_argument( '--unzip', action='store_true', help='whether unzip dataset or not, zipped files will be saved') parser.add_argument( '--delete', action='store_true', help='delete the download zipped files') parser.add_argument( '--threads', type=int, help='number of threading', default=4) args = parser.parse_args() return args def download(url, dir, unzip=True, delete=False, threads=1): def download_one(url, dir): f = dir / Path(url).name if Path(url).is_file(): Path(url).rename(f) elif not f.exists(): print('Downloading {} to {}'.format(url, f)) torch.hub.download_url_to_file(url, f, progress=True) if unzip and f.suffix in ('.zip', '.tar'): print('Unzipping {}'.format(f.name)) if f.suffix == '.zip': ZipFile(f).extractall(path=dir) elif f.suffix == '.tar': TarFile(f).extractall(path=dir) if delete: f.unlink() print('Delete {}'.format(f)) dir = Path(dir) if threads > 1: pool = ThreadPool(threads) pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) pool.close() pool.join() else: for u in [url] if isinstance(url, (str, Path)) else url: download_one(u, dir) def main(): args = parse_args() path = Path(args.save_dir) if not path.exists(): path.mkdir(parents=True, exist_ok=True) data2url = dict( # TODO: Support for downloading Panoptic Segmentation of COCO coco2017=[ 'http://images.cocodataset.org/zips/train2017.zip', 'http://images.cocodataset.org/zips/val2017.zip', 'http://images.cocodataset.org/zips/test2017.zip', 'http://images.cocodataset.org/annotations/' + 'annotations_trainval2017.zip' ], lvis=[ 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa ], voc2007=[ 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa ], ) url = data2url.get(args.dataset_name, None) if url is None: print('Only support COCO, VOC, and LVIS now!') return download( url, dir=path, unzip=args.unzip, delete=args.delete, threads=args.threads) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/misc/gen_coco_panoptic_test_info.py ================================================ import argparse import os.path as osp import mmcv def parse_args(): parser = argparse.ArgumentParser( description='Generate COCO test image information ' 'for COCO panoptic segmentation.') parser.add_argument('data_root', help='Path to COCO annotation directory.') args = parser.parse_args() return args def main(): args = parse_args() data_root = args.data_root val_info = mmcv.load(osp.join(data_root, 'panoptic_val2017.json')) test_old_info = mmcv.load( osp.join(data_root, 'image_info_test-dev2017.json')) # replace categories from image_info_test-dev2017.json # with categories from panoptic_val2017.json which # has attribute `isthing`. test_info = test_old_info test_info.update({'categories': val_info['categories']}) mmcv.dump(test_info, osp.join(data_root, 'panoptic_image_info_test-dev2017.json')) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/misc/get_image_metas.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. """Get test image metas on a specific dataset. Here is an example to run this script. Example: python tools/misc/get_image_metas.py ${CONFIG} \ --out ${OUTPUT FILE NAME} """ import argparse import csv import os.path as osp from multiprocessing import Pool import mmcv from mmcv import Config def parse_args(): parser = argparse.ArgumentParser(description='Collect image metas') parser.add_argument('config', help='Config file path') parser.add_argument( '--out', default='validation-image-metas.pkl', help='The output image metas file name. The save dir is in the ' 'same directory as `dataset.ann_file` path') parser.add_argument( '--nproc', default=4, type=int, help='Processes used for get image metas') args = parser.parse_args() return args def get_metas_from_csv_style_ann_file(ann_file): data_infos = [] cp_filename = None with open(ann_file, 'r') as f: reader = csv.reader(f) for i, line in enumerate(reader): if i == 0: continue img_id = line[0] filename = f'{img_id}.jpg' if filename != cp_filename: data_infos.append(dict(filename=filename)) cp_filename = filename return data_infos def get_metas_from_txt_style_ann_file(ann_file): with open(ann_file) as f: lines = f.readlines() i = 0 data_infos = [] while i < len(lines): filename = lines[i].rstrip() data_infos.append(dict(filename=filename)) skip_lines = int(lines[i + 2]) + 3 i += skip_lines return data_infos def get_image_metas(data_info, img_prefix): file_client = mmcv.FileClient(backend='disk') filename = data_info.get('filename', None) if filename is not None: if img_prefix is not None: filename = osp.join(img_prefix, filename) img_bytes = file_client.get(filename) img = mmcv.imfrombytes(img_bytes, flag='color') meta = dict(filename=filename, ori_shape=img.shape) else: raise NotImplementedError('Missing `filename` in data_info') return meta def main(): args = parse_args() assert args.out.endswith('pkl'), 'The output file name must be pkl suffix' # load config files cfg = Config.fromfile(args.config) ann_file = cfg.data.test.ann_file img_prefix = cfg.data.test.img_prefix print(f'{"-" * 5} Start Processing {"-" * 5}') if ann_file.endswith('csv'): data_infos = get_metas_from_csv_style_ann_file(ann_file) elif ann_file.endswith('txt'): data_infos = get_metas_from_txt_style_ann_file(ann_file) else: shuffix = ann_file.split('.')[-1] raise NotImplementedError('File name must be csv or txt suffix but ' f'get {shuffix}') print(f'Successfully load annotation file from {ann_file}') print(f'Processing {len(data_infos)} images...') pool = Pool(args.nproc) # get image metas with multiple processes image_metas = pool.starmap( get_image_metas, zip(data_infos, [img_prefix for _ in range(len(data_infos))]), ) pool.close() # save image metas root_path = cfg.data.test.ann_file.rsplit('/', 1)[0] save_path = osp.join(root_path, args.out) mmcv.dump(image_metas, save_path) print(f'Image meta file save to: {save_path}') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/misc/print_config.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import warnings from mmcv import Config, DictAction from mmdet.utils import replace_cfg_vals, update_data_root def parse_args(): parser = argparse.ArgumentParser(description='Print the whole config') parser.add_argument('config', help='config file path') parser.add_argument( '--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file (deprecate), ' 'change to --cfg-options instead.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') args = parser.parse_args() if args.options and args.cfg_options: raise ValueError( '--options and --cfg-options cannot be both ' 'specified, --options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args def main(): args = parse_args() cfg = Config.fromfile(args.config) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) print(f'Config:\n{cfg.pretty_text}') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/misc/split_coco.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os.path as osp import mmcv import numpy as np prog_description = '''K-Fold coco split. To split coco data for semi-supervised object detection: python tools/misc/split_coco.py ''' def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( '--data-root', type=str, help='The data root of coco dataset.', default='./data/coco/') parser.add_argument( '--out-dir', type=str, help='The output directory of coco semi-supervised annotations.', default='./data/coco_semi_annos/') parser.add_argument( '--labeled-percent', type=float, nargs='+', help='The percentage of labeled data in the training set.', default=[1, 2, 5, 10]) parser.add_argument( '--fold', type=int, help='K-fold cross validation for semi-supervised object detection.', default=5) args = parser.parse_args() return args def split_coco(data_root, out_dir, percent, fold): """Split COCO data for Semi-supervised object detection. Args: data_root (str): The data root of coco dataset. out_dir (str): The output directory of coco semi-supervised annotations. percent (float): The percentage of labeled data in the training set. fold (int): The fold of dataset and set as random seed for data split. """ def save_anns(name, images, annotations): sub_anns = dict() sub_anns['images'] = images sub_anns['annotations'] = annotations sub_anns['licenses'] = anns['licenses'] sub_anns['categories'] = anns['categories'] sub_anns['info'] = anns['info'] mmcv.mkdir_or_exist(out_dir) mmcv.dump(sub_anns, f'{out_dir}/{name}.json') # set random seed with the fold np.random.seed(fold) ann_file = osp.join(data_root, 'annotations/instances_train2017.json') anns = mmcv.load(ann_file) image_list = anns['images'] labeled_total = int(percent / 100. * len(image_list)) labeled_inds = set( np.random.choice(range(len(image_list)), size=labeled_total)) labeled_ids, labeled_images, unlabeled_images = [], [], [] for i in range(len(image_list)): if i in labeled_inds: labeled_images.append(image_list[i]) labeled_ids.append(image_list[i]['id']) else: unlabeled_images.append(image_list[i]) # get all annotations of labeled images labeled_ids = set(labeled_ids) labeled_annotations, unlabeled_annotations = [], [] for ann in anns['annotations']: if ann['image_id'] in labeled_ids: labeled_annotations.append(ann) else: unlabeled_annotations.append(ann) # save labeled and unlabeled labeled_name = f'instances_train2017.{fold}@{percent}' unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled' save_anns(labeled_name, labeled_images, labeled_annotations) save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations) def multi_wrapper(args): return split_coco(*args) if __name__ == '__main__': args = parse_args() arguments_list = [(args.data_root, args.out_dir, p, f) for f in range(1, args.fold + 1) for p in args.labeled_percent] mmcv.track_parallel_progress(multi_wrapper, arguments_list, args.fold) ================================================ FILE: DLTA_AI_app/mmdetection/tools/model_converters/detectron2pytorch.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse from collections import OrderedDict import mmcv import torch arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)} def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names): # detectron replace bn with affine channel layer state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name + '_b']) state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name + '_s']) bn_size = state_dict[torch_name + '.weight'].size() state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size) state_dict[torch_name + '.running_var'] = torch.ones(bn_size) converted_names.add(caffe_name + '_b') converted_names.add(caffe_name + '_s') def convert_conv_fc(blobs, state_dict, caffe_name, torch_name, converted_names): state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name + '_w']) converted_names.add(caffe_name + '_w') if caffe_name + '_b' in blobs: state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name + '_b']) converted_names.add(caffe_name + '_b') def convert(src, dst, depth): """Convert keys in detectron pretrained ResNet models to pytorch style.""" # load arch_settings if depth not in arch_settings: raise ValueError('Only support ResNet-50 and ResNet-101 currently') block_nums = arch_settings[depth] # load caffe model caffe_model = mmcv.load(src, encoding='latin1') blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model # convert to pytorch style state_dict = OrderedDict() converted_names = set() convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names) convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names) for i in range(1, len(block_nums) + 1): for j in range(block_nums[i - 1]): if j == 0: convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1', f'layer{i}.{j}.downsample.0', converted_names) convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn', f'layer{i}.{j}.downsample.1', converted_names) for k, letter in enumerate(['a', 'b', 'c']): convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch2{letter}', f'layer{i}.{j}.conv{k+1}', converted_names) convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch2{letter}_bn', f'layer{i}.{j}.bn{k + 1}', converted_names) # check if all layers are converted for key in blobs: if key not in converted_names: print(f'Not Convert: {key}') # save checkpoint checkpoint = dict() checkpoint['state_dict'] = state_dict torch.save(checkpoint, dst) def main(): parser = argparse.ArgumentParser(description='Convert model keys') parser.add_argument('src', help='src detectron model path') parser.add_argument('dst', help='save path') parser.add_argument('depth', type=int, help='ResNet model depth') args = parser.parse_args() convert(args.src, args.dst, args.depth) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/model_converters/publish_model.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import subprocess import torch def parse_args(): parser = argparse.ArgumentParser( description='Process a checkpoint to be published') parser.add_argument('in_file', help='input checkpoint filename') parser.add_argument('out_file', help='output checkpoint filename') args = parser.parse_args() return args def process_checkpoint(in_file, out_file): checkpoint = torch.load(in_file, map_location='cpu') # remove optimizer for smaller file size if 'optimizer' in checkpoint: del checkpoint['optimizer'] # if it is necessary to remove some sensitive data in checkpoint['meta'], # add the code here. if torch.__version__ >= '1.6': torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) else: torch.save(checkpoint, out_file) sha = subprocess.check_output(['sha256sum', out_file]).decode() if out_file.endswith('.pth'): out_file_name = out_file[:-4] else: out_file_name = out_file final_file = out_file_name + f'-{sha[:8]}.pth' subprocess.Popen(['mv', out_file, final_file]) def main(): args = parse_args() process_checkpoint(args.in_file, args.out_file) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/model_converters/regnet2mmdet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse from collections import OrderedDict import torch def convert_stem(model_key, model_weight, state_dict, converted_names): new_key = model_key.replace('stem.conv', 'conv1') new_key = new_key.replace('stem.bn', 'bn1') state_dict[new_key] = model_weight converted_names.add(model_key) print(f'Convert {model_key} to {new_key}') def convert_head(model_key, model_weight, state_dict, converted_names): new_key = model_key.replace('head.fc', 'fc') state_dict[new_key] = model_weight converted_names.add(model_key) print(f'Convert {model_key} to {new_key}') def convert_reslayer(model_key, model_weight, state_dict, converted_names): split_keys = model_key.split('.') layer, block, module = split_keys[:3] block_id = int(block[1:]) layer_name = f'layer{int(layer[1:])}' block_name = f'{block_id - 1}' if block_id == 1 and module == 'bn': new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' elif block_id == 1 and module == 'proj': new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' elif module == 'f': if split_keys[3] == 'a_bn': module_name = 'bn1' elif split_keys[3] == 'b_bn': module_name = 'bn2' elif split_keys[3] == 'c_bn': module_name = 'bn3' elif split_keys[3] == 'a': module_name = 'conv1' elif split_keys[3] == 'b': module_name = 'conv2' elif split_keys[3] == 'c': module_name = 'conv3' new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' else: raise ValueError(f'Unsupported conversion of key {model_key}') print(f'Convert {model_key} to {new_key}') state_dict[new_key] = model_weight converted_names.add(model_key) def convert(src, dst): """Convert keys in pycls pretrained RegNet models to mmdet style.""" # load caffe model regnet_model = torch.load(src) blobs = regnet_model['model_state'] # convert to pytorch style state_dict = OrderedDict() converted_names = set() for key, weight in blobs.items(): if 'stem' in key: convert_stem(key, weight, state_dict, converted_names) elif 'head' in key: convert_head(key, weight, state_dict, converted_names) elif key.startswith('s'): convert_reslayer(key, weight, state_dict, converted_names) # check if all layers are converted for key in blobs: if key not in converted_names: print(f'not converted: {key}') # save checkpoint checkpoint = dict() checkpoint['state_dict'] = state_dict torch.save(checkpoint, dst) def main(): parser = argparse.ArgumentParser(description='Convert model keys') parser.add_argument('src', help='src detectron model path') parser.add_argument('dst', help='save path') args = parser.parse_args() convert(args.src, args.dst) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/model_converters/selfsup2mmdet.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse from collections import OrderedDict import torch def moco_convert(src, dst): """Convert keys in pycls pretrained moco models to mmdet style.""" # load caffe model moco_model = torch.load(src) blobs = moco_model['state_dict'] # convert to pytorch style state_dict = OrderedDict() for k, v in blobs.items(): if not k.startswith('module.encoder_q.'): continue old_k = k k = k.replace('module.encoder_q.', '') state_dict[k] = v print(old_k, '->', k) # save checkpoint checkpoint = dict() checkpoint['state_dict'] = state_dict torch.save(checkpoint, dst) def main(): parser = argparse.ArgumentParser(description='Convert model keys') parser.add_argument('src', help='src detectron model path') parser.add_argument('dst', help='save path') parser.add_argument( '--selfsup', type=str, choices=['moco', 'swav'], help='save path') args = parser.parse_args() if args.selfsup == 'moco': moco_convert(args.src, args.dst) elif args.selfsup == 'swav': print('SWAV does not need to convert the keys') if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/model_converters/upgrade_model_version.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import re import tempfile from collections import OrderedDict import torch from mmcv import Config def is_head(key): valid_head_list = [ 'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head' ] return any(key.startswith(h) for h in valid_head_list) def parse_config(config_strings): temp_file = tempfile.NamedTemporaryFile() config_path = f'{temp_file.name}.py' with open(config_path, 'w') as f: f.write(config_strings) config = Config.fromfile(config_path) is_two_stage = True is_ssd = False is_retina = False reg_cls_agnostic = False if 'rpn_head' not in config.model: is_two_stage = False # check whether it is SSD if config.model.bbox_head.type == 'SSDHead': is_ssd = True elif config.model.bbox_head.type == 'RetinaHead': is_retina = True elif isinstance(config.model['bbox_head'], list): reg_cls_agnostic = True elif 'reg_class_agnostic' in config.model.bbox_head: reg_cls_agnostic = config.model.bbox_head \ .reg_class_agnostic temp_file.close() return is_two_stage, is_ssd, is_retina, reg_cls_agnostic def reorder_cls_channel(val, num_classes=81): # bias if val.dim() == 1: new_val = torch.cat((val[1:], val[:1]), dim=0) # weight else: out_channels, in_channels = val.shape[:2] # conv_cls for softmax output if out_channels != num_classes and out_channels % num_classes == 0: new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:]) new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1) new_val = new_val.reshape(val.size()) # fc_cls elif out_channels == num_classes: new_val = torch.cat((val[1:], val[:1]), dim=0) # agnostic | retina_cls | rpn_cls else: new_val = val return new_val def truncate_cls_channel(val, num_classes=81): # bias if val.dim() == 1: if val.size(0) % num_classes == 0: new_val = val[:num_classes - 1] else: new_val = val # weight else: out_channels, in_channels = val.shape[:2] # conv_logits if out_channels % num_classes == 0: new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:] new_val = new_val.reshape(-1, *val.shape[1:]) # agnostic else: new_val = val return new_val def truncate_reg_channel(val, num_classes=81): # bias if val.dim() == 1: # fc_reg | rpn_reg if val.size(0) % num_classes == 0: new_val = val.reshape(num_classes, -1)[:num_classes - 1] new_val = new_val.reshape(-1) # agnostic else: new_val = val # weight else: out_channels, in_channels = val.shape[:2] # fc_reg | rpn_reg if out_channels % num_classes == 0: new_val = val.reshape(num_classes, -1, in_channels, *val.shape[2:])[1:] new_val = new_val.reshape(-1, *val.shape[1:]) # agnostic else: new_val = val return new_val def convert(in_file, out_file, num_classes): """Convert keys in checkpoints. There can be some breaking changes during the development of mmdetection, and this tool is used for upgrading checkpoints trained with old versions to the latest one. """ checkpoint = torch.load(in_file) in_state_dict = checkpoint.pop('state_dict') out_state_dict = OrderedDict() meta_info = checkpoint['meta'] is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config( '#' + meta_info['config']) if meta_info['mmdet_version'] <= '0.5.3' and is_retina: upgrade_retina = True else: upgrade_retina = False # MMDetection v2.5.0 unifies the class order in RPN # if the model is trained in version=2.5.0 if meta_info['mmdet_version'] < '2.5.0': upgrade_rpn = True else: upgrade_rpn = False for key, val in in_state_dict.items(): new_key = key new_val = val if is_two_stage and is_head(key): new_key = 'roi_head.{}'.format(key) # classification if upgrade_rpn: m = re.search( r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|' r'fovea_cls).(weight|bias)', new_key) else: m = re.search( r'(conv_cls|retina_cls|fc_cls|fcos_cls|' r'fovea_cls).(weight|bias)', new_key) if m is not None: print(f'reorder cls channels of {new_key}') new_val = reorder_cls_channel(val, num_classes) # regression if upgrade_rpn: m = re.search(r'(fc_reg).(weight|bias)', new_key) else: m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key) if m is not None and not reg_cls_agnostic: print(f'truncate regression channels of {new_key}') new_val = truncate_reg_channel(val, num_classes) # mask head m = re.search(r'(conv_logits).(weight|bias)', new_key) if m is not None: print(f'truncate mask prediction channels of {new_key}') new_val = truncate_cls_channel(val, num_classes) m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) # Legacy issues in RetinaNet since V1.x # Use ConvModule instead of nn.Conv2d in RetinaNet # cls_convs.0.weight -> cls_convs.0.conv.weight if m is not None and upgrade_retina: param = m.groups()[1] new_key = key.replace(param, f'conv.{param}') out_state_dict[new_key] = val print(f'rename the name of {key} to {new_key}') continue m = re.search(r'(cls_convs).\d.(weight|bias)', key) if m is not None and is_ssd: print(f'reorder cls channels of {new_key}') new_val = reorder_cls_channel(val, num_classes) out_state_dict[new_key] = new_val checkpoint['state_dict'] = out_state_dict torch.save(checkpoint, out_file) def main(): parser = argparse.ArgumentParser(description='Upgrade model version') parser.add_argument('in_file', help='input checkpoint file') parser.add_argument('out_file', help='output checkpoint file') parser.add_argument( '--num-classes', type=int, default=81, help='number of classes of the original model') args = parser.parse_args() convert(args.in_file, args.out_file, args.num_classes) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/model_converters/upgrade_ssd_version.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import tempfile from collections import OrderedDict import torch from mmcv import Config def parse_config(config_strings): temp_file = tempfile.NamedTemporaryFile() config_path = f'{temp_file.name}.py' with open(config_path, 'w') as f: f.write(config_strings) config = Config.fromfile(config_path) # check whether it is SSD if config.model.bbox_head.type != 'SSDHead': raise AssertionError('This is not a SSD model.') def convert(in_file, out_file): checkpoint = torch.load(in_file) in_state_dict = checkpoint.pop('state_dict') out_state_dict = OrderedDict() meta_info = checkpoint['meta'] parse_config('#' + meta_info['config']) for key, value in in_state_dict.items(): if 'extra' in key: layer_idx = int(key.split('.')[2]) new_key = 'neck.extra_layers.{}.{}.conv.'.format( layer_idx // 2, layer_idx % 2) + key.split('.')[-1] elif 'l2_norm' in key: new_key = 'neck.l2_norm.weight' elif 'bbox_head' in key: new_key = key[:21] + '.0' + key[21:] else: new_key = key out_state_dict[new_key] = value checkpoint['state_dict'] = out_state_dict if torch.__version__ >= '1.6': torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) else: torch.save(checkpoint, out_file) def main(): parser = argparse.ArgumentParser(description='Upgrade SSD version') parser.add_argument('in_file', help='input checkpoint file') parser.add_argument('out_file', help='output checkpoint file') args = parser.parse_args() convert(args.in_file, args.out_file) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/slurm_test.sh ================================================ #!/usr/bin/env bash set -x PARTITION=$1 JOB_NAME=$2 CONFIG=$3 CHECKPOINT=$4 GPUS=${GPUS:-8} GPUS_PER_NODE=${GPUS_PER_NODE:-8} CPUS_PER_TASK=${CPUS_PER_TASK:-5} PY_ARGS=${@:5} SRUN_ARGS=${SRUN_ARGS:-""} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ srun -p ${PARTITION} \ --job-name=${JOB_NAME} \ --gres=gpu:${GPUS_PER_NODE} \ --ntasks=${GPUS} \ --ntasks-per-node=${GPUS_PER_NODE} \ --cpus-per-task=${CPUS_PER_TASK} \ --kill-on-bad-exit=1 \ ${SRUN_ARGS} \ python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} ================================================ FILE: DLTA_AI_app/mmdetection/tools/slurm_train.sh ================================================ #!/usr/bin/env bash set -x PARTITION=$1 JOB_NAME=$2 CONFIG=$3 WORK_DIR=$4 GPUS=${GPUS:-8} GPUS_PER_NODE=${GPUS_PER_NODE:-8} CPUS_PER_TASK=${CPUS_PER_TASK:-5} SRUN_ARGS=${SRUN_ARGS:-""} PY_ARGS=${@:5} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ srun -p ${PARTITION} \ --job-name=${JOB_NAME} \ --gres=gpu:${GPUS_PER_NODE} \ --ntasks=${GPUS} \ --ntasks-per-node=${GPUS_PER_NODE} \ --cpus-per-task=${CPUS_PER_TASK} \ --kill-on-bad-exit=1 \ ${SRUN_ARGS} \ python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} ================================================ FILE: DLTA_AI_app/mmdetection/tools/test.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp import time import warnings import mmcv import torch from mmcv import Config, DictAction from mmcv.cnn import fuse_conv_bn from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, wrap_fp16_model) from mmdet.apis import multi_gpu_test, single_gpu_test from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor) from mmdet.models import build_detector from mmdet.utils import (build_ddp, build_dp, compat_cfg, get_device, replace_cfg_vals, setup_multi_processes, update_data_root) def parse_args(): parser = argparse.ArgumentParser( description='MMDet test (and eval) a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument( '--work-dir', help='the directory to save the file containing evaluation metrics') parser.add_argument('--out', help='output result file in pickle format') parser.add_argument( '--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increase' 'the inference speed') parser.add_argument( '--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument( '--gpu-id', type=int, default=0, help='id of gpu to use ' '(only applicable to non-distributed testing)') parser.add_argument( '--format-only', action='store_true', help='Format the output results without perform evaluation. It is' 'useful when you want to format the result to a specific format and ' 'submit it to the test server') parser.add_argument( '--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox",' ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument( '--show-dir', help='directory where painted images will be saved') parser.add_argument( '--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)') parser.add_argument( '--gpu-collect', action='store_true', help='whether to use gpu to collect results.') parser.add_argument( '--tmpdir', help='tmp directory used for collecting results from multiple ' 'workers, available when gpu-collect is not specified') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy ' 'format will be kwargs for dataset.evaluate() function (deprecate), ' 'change to --eval-options instead.') parser.add_argument( '--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy ' 'format will be kwargs for dataset.evaluate() function') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) if args.options and args.eval_options: raise ValueError( '--options and --eval-options cannot be both ' 'specified, --options is deprecated in favor of --eval-options') if args.options: warnings.warn('--options is deprecated in favor of --eval-options') args.eval_options = args.options return args def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = Config.fromfile(args.config) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) cfg = compat_cfg(cfg) # set multi-process settings setup_multi_processes(cfg) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if 'pretrained' in cfg.model: cfg.model.pretrained = None elif 'init_cfg' in cfg.model.backbone: cfg.model.backbone.init_cfg = None if cfg.model.get('neck'): if isinstance(cfg.model.neck, list): for neck_cfg in cfg.model.neck: if neck_cfg.get('rfp_backbone'): if neck_cfg.rfp_backbone.get('pretrained'): neck_cfg.rfp_backbone.pretrained = None elif cfg.model.neck.get('rfp_backbone'): if cfg.model.neck.rfp_backbone.get('pretrained'): cfg.model.neck.rfp_backbone.pretrained = None if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids[0:1] warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' 'Because we only support single GPU mode in ' 'non-distributed testing. Use the first GPU ' 'in `gpu_ids` now.') else: cfg.gpu_ids = [args.gpu_id] cfg.device = get_device() # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) test_dataloader_default_args = dict( samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False) # in case the test dataset is concatenated if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.test.pipeline = replace_ImageToTensor( cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1: for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) test_loader_cfg = { **test_dataloader_default_args, **cfg.data.get('test_dataloader', {}) } rank, _ = get_dist_info() # allows not to create if args.work_dir is not None and rank == 0: mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) json_file = osp.join(args.work_dir, f'eval_{timestamp}.json') # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, **test_loader_cfg) # build the model and load checkpoint cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is None and cfg.get('device', None) == 'npu': fp16_cfg = dict(loss_scale='dynamic') if fp16_cfg is not None: wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if not distributed: model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr) else: model = build_ddp( model, cfg.device, device_ids=[int(os.environ['LOCAL_RANK'])], broadcast_buffers=False) # In multi_gpu_test, if tmpdir is None, some tesnors # will init on cuda by default, and no device choice supported. # Init a tmpdir to avoid error on npu here. if cfg.device == 'npu' and args.tmpdir is None: args.tmpdir = './npu_tmpdir' outputs = multi_gpu_test( model, data_loader, args.tmpdir, args.gpu_collect or cfg.evaluation.get('gpu_collect', False)) rank, _ = get_dist_info() if rank == 0: if args.out: print(f'\nwriting results to {args.out}') mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: eval_kwargs = cfg.get('evaluation', {}).copy() # hard-code way to remove EvalHook args for key in [ 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'dynamic_intervals' ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) metric = dataset.evaluate(outputs, **eval_kwargs) print(metric) metric_dict = dict(config=args.config, metric=metric) if args.work_dir is not None and rank == 0: mmcv.dump(metric_dict, json_file) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/mmdetection/tools/train.py ================================================ # Copyright (c) OpenMMLab. All rights reserved. import argparse import copy import os import os.path as osp import time import warnings import mmcv import torch import torch.distributed as dist from mmcv import Config, DictAction from mmcv.runner import get_dist_info, init_dist from mmcv.utils import get_git_hash from mmdet import __version__ from mmdet.apis import init_random_seed, set_random_seed, train_detector from mmdet.datasets import build_dataset from mmdet.models import build_detector from mmdet.utils import (collect_env, get_device, get_root_logger, replace_cfg_vals, setup_multi_processes, update_data_root) def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument( '--resume-from', help='the checkpoint file to resume from') parser.add_argument( '--auto-resume', action='store_true', help='resume from the latest checkpoint automatically') parser.add_argument( '--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument( '--gpus', type=int, help='(Deprecated, please use --gpu-id) number of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument( '--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use ' '(only applicable to non-distributed training)') group_gpus.add_argument( '--gpu-id', type=int, default=0, help='id of gpu to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( '--diff-seed', action='store_true', help='Whether or not set different seeds for different ranks') parser.add_argument( '--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument( '--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file (deprecate), ' 'change to --cfg-options instead.') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument( '--auto-scale-lr', action='store_true', help='enable automatically scaling LR.') args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) if args.options and args.cfg_options: raise ValueError( '--options and --cfg-options cannot be both ' 'specified, --options is deprecated in favor of --cfg-options') if args.options: warnings.warn('--options is deprecated in favor of --cfg-options') args.cfg_options = args.options return args def main(): args = parse_args() cfg = Config.fromfile(args.config) # replace the ${key} with the value of cfg.key cfg = replace_cfg_vals(cfg) # update data root according to MMDET_DATASETS update_data_root(cfg) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) if args.auto_scale_lr: if 'auto_scale_lr' in cfg and \ 'enable' in cfg.auto_scale_lr and \ 'base_batch_size' in cfg.auto_scale_lr: cfg.auto_scale_lr.enable = True else: warnings.warn('Can not find "auto_scale_lr" or ' '"auto_scale_lr.enable" or ' '"auto_scale_lr.base_batch_size" in your' ' configuration file. Please update all the ' 'configuration files to mmdet >= 2.24.1.') # set multi-process settings setup_multi_processes(cfg) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.auto_resume = args.auto_resume if args.gpus is not None: cfg.gpu_ids = range(1) warnings.warn('`--gpus` is deprecated because we only support ' 'single GPU mode in non-distributed training. ' 'Use `gpus=1` now.') if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids[0:1] warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' 'Because we only support single GPU mode in ' 'non-distributed training. Use the first GPU ' 'in `gpu_ids` now.') if args.gpus is None and args.gpu_ids is None: cfg.gpu_ids = [args.gpu_id] # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() cfg.gpu_ids = range(world_size) # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info meta['config'] = cfg.pretty_text # log some basic info logger.info(f'Distributed training: {distributed}') logger.info(f'Config:\n{cfg.pretty_text}') cfg.device = get_device() # set random seeds seed = init_random_seed(args.seed, device=cfg.device) seed = seed + dist.get_rank() if args.diff_seed else seed logger.info(f'Set random seed to {seed}, ' f'deterministic: {args.deterministic}') set_random_seed(seed, deterministic=args.deterministic) cfg.seed = seed meta['seed'] = seed meta['exp_name'] = osp.basename(args.config) model = build_detector( cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) model.init_weights() datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: assert 'val' in [mode for (mode, _) in cfg.workflow] val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.get( 'pipeline', cfg.data.train.dataset.get('pipeline')) datasets.append(build_dataset(val_dataset)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__ + get_git_hash()[:7], CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_detector( model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta) if __name__ == '__main__': main() ================================================ FILE: DLTA_AI_app/models_menu/mmscraper.py ================================================ import requests from bs4 import BeautifulSoup import json import requests from tqdm import tqdm url = 'https://raw.githubusercontent.com/open-mmlab/mmdetection/2.x/README.md' page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all ul tags ul_tags = soup.find_all('ul') instance_seg = ul_tags[1] # for the all ul tags, make a dict of each li tag contents and href and append to a list li_tags = instance_seg.find_all('li') li_tags_list = [] for li in li_tags: li_tags_list.append( {'name': li.find('a').text, 'link': "https://github.com/open-mmlab/mmdetection/tree/2.x/" + li.find('a')['href'], }) model_id = 0 col_names = ["id", "Model", "Model Name", "Backbone", "Lr schd", "Memory (GB)", "Inference Time (fps)", "Box AP", "Mask AP", "Config", "Checkpoint_link"] tr_tags_list = [] # ================================================================================================= # Mask R-CNN (ICCV'2017) url = li_tags_list[0]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the first table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list table = soup.find_all('table')[0] tr_tags = table.find_all('tr') for tr in tr_tags[1:]: td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "Mask R-CNN" td_tag_dict["Model Name"] = "Mask R-CNN" for i in range(len(td_tags)): if i == 0: td_tag_dict["Backbone"] = td_tags[i].text elif i == 1: td_tag_dict["Style"] = td_tags[i].text elif i == 2: td_tag_dict["Lr schd"] = td_tags[i].text elif i == 3: td_tag_dict["Memory (GB)"] = td_tags[i].text elif i == 4: td_tag_dict["Inference Time (fps)"] = td_tags[i].text elif i == 5: td_tag_dict["box AP"] = td_tags[i].text elif i == 6: td_tag_dict["mask AP"] = td_tags[i].text elif i == 7: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 8: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # ================================================================================================= # Cascade Mask R-CNN (CVPR'2018) url = li_tags_list[1]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list table = soup.find_all('table')[1] tr_tags = table.find_all('tr') for tr in tr_tags[1:]: td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "Cascade Mask R-CNN" td_tag_dict["Model Name"] = "Cascade Mask R-CNN" for i in range(len(td_tags)): if i == 0: td_tag_dict["Backbone"] = td_tags[i].text elif i == 1: td_tag_dict["Style"] = td_tags[i].text elif i == 2: td_tag_dict["Lr schd"] = td_tags[i].text elif i == 3: td_tag_dict["Memory (GB)"] = td_tags[i].text elif i == 4: td_tag_dict["Inference Time (fps)"] = td_tags[i].text elif i == 5: td_tag_dict["box AP"] = td_tags[i].text elif i == 6: td_tag_dict["mask AP"] = td_tags[i].text elif i == 7: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 8: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # ================================================================================================= # Mask Scoring R-CNN (CVPR'2019) url = li_tags_list[2]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list table = soup.find_all('table')[0] tr_tags = table.find_all('tr') for tr in tr_tags[1:]: td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "Mask Scoring R-CNN" td_tag_dict["Model Name"] = "Mask Scoring R-CNN" for i in range(len(td_tags)): if i == 0: td_tag_dict["Backbone"] = td_tags[i].text elif i == 1: td_tag_dict["Style"] = td_tags[i].text elif i == 2: td_tag_dict["Lr schd"] = td_tags[i].text elif i == 3: td_tag_dict["Memory (GB)"] = td_tags[i].text elif i == 4: td_tag_dict["Inference Time (fps)"] = td_tags[i].text elif i == 5: td_tag_dict["box AP"] = td_tags[i].text elif i == 6: td_tag_dict["mask AP"] = td_tags[i].text elif i == 7: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 8: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # =================================================================================================# Hybrid Task Cascade (CVPR'2019) url = li_tags_list[3]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list table = soup.find_all('table')[0] tr_tags = table.find_all('tr') for tr in tr_tags[1:]: td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "Hybrid Task Cascade" td_tag_dict["Model Name"] = "Hybrid Task Cascade" for i in range(len(td_tags)): if i == 0: td_tag_dict["Backbone"] = td_tags[i].text elif i == 1: td_tag_dict["Style"] = td_tags[i].text elif i == 2: td_tag_dict["Lr schd"] = td_tags[i].text elif i == 3: td_tag_dict["Memory (GB)"] = td_tags[i].text elif i == 4: td_tag_dict["Inference Time (fps)"] = td_tags[i].text elif i == 5: td_tag_dict["box AP"] = td_tags[i].text elif i == 6: td_tag_dict["mask AP"] = td_tags[i].text elif i == 7: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 8: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # # ================================================================================================= # # YOLACT (ICCV'2019) url = li_tags_list[4]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list table = soup.find_all('table')[0] tr_tags = table.find_all('tr') for tr in tr_tags[1:]: td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "YOLACT" td_tag_dict["Model Name"] = "YOLACT" for i in range(len(td_tags)): if i == 2: td_tag_dict["Backbone"] = td_tags[i].text td_tag_dict["Style"] = "-" td_tag_dict["Lr schd"] = "-" td_tag_dict["Memory (GB)"] = "-" elif i == 3: td_tag_dict["Inference Time (fps)"] = td_tags[i].text elif i == 4: td_tag_dict["box AP"] = "-" td_tag_dict["mask AP"] = td_tags[i].text elif i == 6: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 7: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # remove the model at index 1tr_tags_list.pop(1) # ================================================================================================= # InstaBoost (ICCV'2019) cancelled ❌ requires custom installation # ================================================================================================= # SOLO (ECCV'2020) url = li_tags_list[6]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list tables = soup.find_all('table') for tableno, table in enumerate(tables): tr_tags = table.find_all('tr') for trno, tr in enumerate(tr_tags[1:]): td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "SOLO" if tableno == 0: td_tag_dict["Model Name"] = "SOLO" elif tableno == 1: td_tag_dict["Model Name"] = "Decoupled SOLO" elif tableno == 2: td_tag_dict["Model Name"] = "Decoupled Light SOLO" for i in range(len(td_tags)): if i == 0: td_tag_dict["Backbone"] = td_tags[i].text elif i == 1: td_tag_dict["Style"] = td_tags[i].text elif i == 3: td_tag_dict["Lr schd"] = td_tags[i].text elif i == 4: td_tag_dict["Memory (GB)"] = td_tags[i].text elif i == 5: td_tag_dict["Inference Time (fps)"] = td_tags[i].text elif i == 6: td_tag_dict["box AP"] = "-" td_tag_dict["mask AP"] = td_tags[i].text elif i == 7: td_tag_dict["Config"] = "https://github.com/open-mmlab/mmdetection/tree/master/" if trno == 0 and tableno == 0: td_tag_dict["Config"] += "configs/solo/solo_r50_fpn_1x_coco.py" elif trno == 1 and tableno == 0: td_tag_dict["Config"] += "configs/solo/solo_r50_fpn_3x_coco.py" elif trno == 0 and tableno == 1: td_tag_dict["Config"] += "configs/solo/decoupled_solo_r50_fpn_1x_coco.py" elif trno == 1 and tableno == 1: td_tag_dict["Config"] += "configs/solo/decoupled_solo_r50_fpn_3x_coco.py" elif trno == 0 and tableno == 2: td_tag_dict["Config"] += "configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py" td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # ================================================================================================= # PointRend (CVPR'2020) cancelled ❌ caffe only # ================================================================================================= # DetectoRS (ArXiv'2020) cancelled ❌ complicated format # ================================================================================================= # SOLOv2 (NeurIPS'2020) url = li_tags_list[9]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list tables = soup.find_all('table') for tableno, table in enumerate(tables): tr_tags = table.find_all('tr') for trno, tr in enumerate(tr_tags[1:]): td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "SOLOv2" if tableno == 0: td_tag_dict["Model Name"] = "SOLOv2" elif tableno == 1: td_tag_dict["Model Name"] = "Light SOLOv2" for i in range(len(td_tags)): if i == 0: td_tag_dict["Backbone"] = td_tags[i].text elif i == 1: td_tag_dict["Style"] = td_tags[i].text elif i == 3: td_tag_dict["Lr schd"] = td_tags[i].text elif i == 4: td_tag_dict["Memory (GB)"] = td_tags[i].text td_tag_dict["Inference Time (fps)"] = "-" td_tag_dict["box AP"] = "-" elif i == 5: td_tag_dict["mask AP"] = td_tags[i].text elif i == 6: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 7: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # ================================================================================================= # SCNet (AAAI'2021) url = li_tags_list[10]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list table = soup.find_all('table')[0] tr_tags = table.find_all('tr') for tr in tr_tags[1:]: td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "SCNet" td_tag_dict["Model Name"] = "SCNet" for i in range(len(td_tags)): if i == 0: td_tag_dict["Backbone"] = td_tags[i].text elif i == 1: td_tag_dict["Style"] = td_tags[i].text elif i == 2: td_tag_dict["Lr schd"] = td_tags[i].text elif i == 3: td_tag_dict["Memory (GB)"] = td_tags[i].text elif i == 4: td_tag_dict["Inference Time (fps)"] = td_tags[i].text elif i == 5: td_tag_dict["box AP"] = td_tags[i].text elif i == 6: td_tag_dict["mask AP"] = td_tags[i].text elif i == 9: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 10: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # ================================================================================================= # QueryInst (ICCV'2021) url = li_tags_list[11]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list table = soup.find_all('table')[0] tr_tags = table.find_all('tr') for tr in tr_tags[1:]: td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "QueryInst" td_tag_dict["Model Name"] = "QueryInst" for i in range(len(td_tags)): if i == 1: td_tag_dict["Backbone"] = td_tags[i].text elif i == 2: td_tag_dict["Style"] = td_tags[i].text elif i == 3: td_tag_dict["Lr schd"] = td_tags[i].text td_tag_dict["Memory (GB)"] = "-" td_tag_dict["Inference Time (fps)"] = "-" elif i == 7: td_tag_dict["box AP"] = td_tags[i].text elif i == 8: td_tag_dict["mask AP"] = td_tags[i].text elif i == 9: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 10: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # ================================================================================================= # Mask2Former (ArXiv'2021) url = li_tags_list[12]['link'] page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all tr tags from the second table in the page, for each tr tag, get all td tags and append to a dictionary, append all dictionaries to a list table = soup.find_all('table')[1] tr_tags = table.find_all('tr') for tr in tr_tags[1:]: td_tags = tr.find_all('td') td_tag_dict = {} td_tag_dict["id"] = model_id model_id += 1 td_tag_dict["Model"] = "Mask2Former" td_tag_dict["Model Name"] = "Mask2Former" for i in range(len(td_tags)): if i == 0: td_tag_dict["Backbone"] = td_tags[i].text elif i == 1: td_tag_dict["Style"] = td_tags[i].text elif i == 3: td_tag_dict["Lr schd"] = td_tags[i].text elif i == 4: td_tag_dict["Memory (GB)"] = td_tags[i].text elif i == 5: td_tag_dict["Inference Time (fps)"] = td_tags[i].text elif i == 6: td_tag_dict["box AP"] = td_tags[i].text elif i == 7: td_tag_dict["mask AP"] = td_tags[i].text elif i == 8: td_tag_dict["Config"] = td_tags[i].find('a')['href'] elif i == 9: td_tag_dict["Checkpoint_link"] = td_tags[i].find('a')['href'] tr_tags_list.append(td_tag_dict) # # Save the list of dictionaries as a json file tr_tags_list = [x for x in tr_tags_list if x["Style"] != "caffe"] # reid all models id_count = 5 corrupted_models = [] for i in tqdm(range(len(tr_tags_list))): tr_tags_list[i]["id"] = id_count id_count += 1 # replace /open-mmlab/mmdetection/blob/master in config with /mmdetection/configs tr_tags_list[i]["Config"] = tr_tags_list[i]["Config"].replace( "https://github.com/open-mmlab/mmdetection/tree/master", "mmdetection") tr_tags_list[i]["Config"] = tr_tags_list[i]["Config"].replace( "https://github.com/open-mmlab/mmdetection/blob/master", "mmdetection") tr_tags_list[i]["Checkpoint"] = "mmdetection/checkpoints/" + tr_tags_list[i]["Checkpoint_link"].split( "/")[-1] tr_tags_list[i]["Checkpoint Size (MB)"] = round(int(requests.head( tr_tags_list[i]["Checkpoint_link"]).headers.get('Content-Length', 0)) / (1024 * 1024), 2) if tr_tags_list[i]["Checkpoint Size (MB)"] == 0: print("Checkpoint size not found for model: ", tr_tags_list[i]["id"]) corrupted_models.append(i) id_count -= 1 tr_tags_list[i].pop("Style", None) # remove corrupted models for i in sorted(corrupted_models, reverse=True): tr_tags_list.pop(i) with open('models_json.json', 'w') as f: json.dump(tr_tags_list, f, indent=4) ================================================ FILE: DLTA_AI_app/models_menu/models_json.json ================================================ [ { "id": 0, "Model": "YOLOv8", "Model Name": "YOLOv8n-seg", "Backbone": "-", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "36.7", "mask AP": "30.5", "Config": "-", "Checkpoint_link": "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt", "Checkpoint": "mmdetection/checkpoints/yolov8n-seg.pt", "Checkpoint Size (MB)": 6.72 }, { "id": 1, "Model": "YOLOv8", "Model Name": "YOLOv8s-seg", "Backbone": "-", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "44.6", "mask AP": "36.8", "Config": "-", "Checkpoint_link": "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt", "Checkpoint": "mmdetection/checkpoints/yolov8s-seg.pt", "Checkpoint Size (MB)": 22.7 }, { "id": 2, "Model": "YOLOv8", "Model Name": "YOLOv8m-seg", "Backbone": "-", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "49.9", "mask AP": "40.8", "Config": "-", "Checkpoint_link": "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt", "Checkpoint": "mmdetection/checkpoints/yolov8m-seg.pt", "Checkpoint Size (MB)": 52.3 }, { "id": 3, "Model": "YOLOv8", "Model Name": "YOLOv8l-seg", "Backbone": "-", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "52.3", "mask AP": "42.6", "Config": "-", "Checkpoint_link": "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt", "Checkpoint": "mmdetection/checkpoints/yolov8l-seg.pt", "Checkpoint Size (MB)": 88.1 }, { "id": 4, "Model": "YOLOv8", "Model Name": "YOLOv8x-seg", "Backbone": "-", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "53.4", "mask AP": "43.4", "Config": "-", "Checkpoint_link": "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt", "Checkpoint": "mmdetection/checkpoints/yolov8x-seg.pt", "Checkpoint Size (MB)": 137 }, { "id": 5, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "R-50-FPN", "Lr schd": "1x", "Memory (GB)": "4.4", "Inference Time (fps)": "16.1", "box AP": "38.2", "mask AP": "34.7", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth", "Checkpoint Size (MB)": 169.62 }, { "id": 6, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "R-50-FPN (FP16)", "Lr schd": "1x", "Memory (GB)": "3.6", "Inference Time (fps)": "24.1", "box AP": "38.1", "mask AP": "34.7", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth", "Checkpoint Size (MB)": 85.05 }, { "id": 7, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "R-50-FPN", "Lr schd": "2x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "39.2", "mask AP": "35.4", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth", "Checkpoint Size (MB)": 169.63 }, { "id": 8, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "R-101-FPN", "Lr schd": "1x", "Memory (GB)": "6.4", "Inference Time (fps)": "13.5", "box AP": "40.0", "mask AP": "36.1", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth", "Checkpoint Size (MB)": 242.32 }, { "id": 9, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "R-101-FPN", "Lr schd": "2x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "40.8", "mask AP": "36.6", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth", "Checkpoint Size (MB)": 242.32 }, { "id": 10, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "X-101-32x4d-FPN", "Lr schd": "1x", "Memory (GB)": "7.6", "Inference Time (fps)": "11.3", "box AP": "41.9", "mask AP": "37.5", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth", "Checkpoint Size (MB)": 241.03 }, { "id": 11, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "X-101-32x4d-FPN", "Lr schd": "2x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "42.2", "mask AP": "37.8", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth", "Checkpoint Size (MB)": 241.03 }, { "id": 12, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "X-101-64x4d-FPN", "Lr schd": "1x", "Memory (GB)": "10.7", "Inference Time (fps)": "8.0", "box AP": "42.8", "mask AP": "38.4", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth", "Checkpoint Size (MB)": 391.11 }, { "id": 13, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "X-101-64x4d-FPN", "Lr schd": "2x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "42.7", "mask AP": "38.1", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth", "Checkpoint Size (MB)": 391.11 }, { "id": 14, "Model": "Mask R-CNN", "Model Name": "Mask R-CNN", "Backbone": "X-101-32x8d-FPN", "Lr schd": "1x", "Memory (GB)": "10.6", "Inference Time (fps)": "-", "box AP": "42.8", "mask AP": "38.3", "Config": "mmdetection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth", "Checkpoint": "mmdetection/checkpoints/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth", "Checkpoint Size (MB)": 411.48 }, { "id": 15, "Model": "Cascade Mask R-CNN", "Model Name": "Cascade Mask R-CNN", "Backbone": "R-50-FPN", "Lr schd": "1x", "Memory (GB)": "6.0", "Inference Time (fps)": "11.2", "box AP": "41.2", "mask AP": "35.9", "Config": "mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth", "Checkpoint": "mmdetection/checkpoints/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth", "Checkpoint Size (MB)": 295.24 }, { "id": 16, "Model": "Cascade Mask R-CNN", "Model Name": "Cascade Mask R-CNN", "Backbone": "R-50-FPN", "Lr schd": "20e", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "41.9", "mask AP": "36.5", "Config": "mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth", "Checkpoint": "mmdetection/checkpoints/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth", "Checkpoint Size (MB)": 295.25 }, { "id": 17, "Model": "Cascade Mask R-CNN", "Model Name": "Cascade Mask R-CNN", "Backbone": "R-101-FPN", "Lr schd": "1x", "Memory (GB)": "7.9", "Inference Time (fps)": "9.8", "box AP": "42.9", "mask AP": "37.3", "Config": "mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth", "Checkpoint": "mmdetection/checkpoints/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth", "Checkpoint Size (MB)": 367.94 }, { "id": 18, "Model": "Cascade Mask R-CNN", "Model Name": "Cascade Mask R-CNN", "Backbone": "R-101-FPN", "Lr schd": "20e", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "43.4", "mask AP": "37.8", "Config": "mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth", "Checkpoint": "mmdetection/checkpoints/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth", "Checkpoint Size (MB)": 367.95 }, { "id": 19, "Model": "Cascade Mask R-CNN", "Model Name": "Cascade Mask R-CNN", "Backbone": "X-101-32x4d-FPN", "Lr schd": "1x", "Memory (GB)": "9.2", "Inference Time (fps)": "8.6", "box AP": "44.3", "mask AP": "38.3", "Config": "mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth", "Checkpoint": "mmdetection/checkpoints/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth", "Checkpoint Size (MB)": 366.65 }, { "id": 20, "Model": "Cascade Mask R-CNN", "Model Name": "Cascade Mask R-CNN", "Backbone": "X-101-32x4d-FPN", "Lr schd": "20e", "Memory (GB)": "9.2", "Inference Time (fps)": "-", "box AP": "45.0", "mask AP": "39.0", "Config": "mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth", "Checkpoint": "mmdetection/checkpoints/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth", "Checkpoint Size (MB)": 366.65 }, { "id": 21, "Model": "Cascade Mask R-CNN", "Model Name": "Cascade Mask R-CNN", "Backbone": "X-101-64x4d-FPN", "Lr schd": "1x", "Memory (GB)": "12.2", "Inference Time (fps)": "6.7", "box AP": "45.3", "mask AP": "39.2", "Config": "mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth", "Checkpoint": "mmdetection/checkpoints/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth", "Checkpoint Size (MB)": 516.73 }, { "id": 22, "Model": "Cascade Mask R-CNN", "Model Name": "Cascade Mask R-CNN", "Backbone": "X-101-64x4d-FPN", "Lr schd": "20e", "Memory (GB)": "12.2", "Inference Time (fps)": "", "box AP": "45.6", "mask AP": "39.5", "Config": "mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth", "Checkpoint": "mmdetection/checkpoints/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth", "Checkpoint Size (MB)": 516.73 }, { "id": 23, "Model": "Mask Scoring R-CNN", "Model Name": "Mask Scoring R-CNN", "Backbone": "R-X101-32x4d", "Lr schd": "2x", "Memory (GB)": "7.9", "Inference Time (fps)": "11.0", "box AP": "41.8", "mask AP": "38.7", "Config": "mmdetection/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth", "Checkpoint": "mmdetection/checkpoints/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth", "Checkpoint Size (MB)": 303.36 }, { "id": 24, "Model": "Mask Scoring R-CNN", "Model Name": "Mask Scoring R-CNN", "Backbone": "R-X101-64x4d", "Lr schd": "1x", "Memory (GB)": "11.0", "Inference Time (fps)": "8.0", "box AP": "43.0", "mask AP": "39.5", "Config": "mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth", "Checkpoint": "mmdetection/checkpoints/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth", "Checkpoint Size (MB)": 453.44 }, { "id": 25, "Model": "Mask Scoring R-CNN", "Model Name": "Mask Scoring R-CNN", "Backbone": "R-X101-64x4d", "Lr schd": "2x", "Memory (GB)": "11.0", "Inference Time (fps)": "8.0", "box AP": "42.6", "mask AP": "39.5", "Config": "mmdetection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth", "Checkpoint": "mmdetection/checkpoints/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth", "Checkpoint Size (MB)": 453.44 }, { "id": 26, "Model": "Hybrid Task Cascade", "Model Name": "Hybrid Task Cascade", "Backbone": "R-50-FPN", "Lr schd": "1x", "Memory (GB)": "8.2", "Inference Time (fps)": "5.8", "box AP": "42.3", "mask AP": "37.4", "Config": "mmdetection/configs/htc/htc_r50_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth", "Checkpoint": "mmdetection/checkpoints/htc_r50_fpn_1x_coco_20200317-7332cf16.pth", "Checkpoint Size (MB)": 306.44 }, { "id": 27, "Model": "Hybrid Task Cascade", "Model Name": "Hybrid Task Cascade", "Backbone": "R-50-FPN", "Lr schd": "20e", "Memory (GB)": "8.2", "Inference Time (fps)": "-", "box AP": "43.3", "mask AP": "38.3", "Config": "mmdetection/configs/htc/htc_r50_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth", "Checkpoint": "mmdetection/checkpoints/htc_r50_fpn_20e_coco_20200319-fe28c577.pth", "Checkpoint Size (MB)": 306.44 }, { "id": 28, "Model": "Hybrid Task Cascade", "Model Name": "Hybrid Task Cascade", "Backbone": "R-101-FPN", "Lr schd": "20e", "Memory (GB)": "10.2", "Inference Time (fps)": "5.5", "box AP": "44.8", "mask AP": "39.6", "Config": "mmdetection/configs/htc/htc_r101_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth", "Checkpoint": "mmdetection/checkpoints/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth", "Checkpoint Size (MB)": 379.14 }, { "id": 29, "Model": "Hybrid Task Cascade", "Model Name": "Hybrid Task Cascade", "Backbone": "X-101-32x4d-FPN", "Lr schd": "20e", "Memory (GB)": "11.4", "Inference Time (fps)": "5.0", "box AP": "46.1", "mask AP": "40.5", "Config": "mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth", "Checkpoint": "mmdetection/checkpoints/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth", "Checkpoint Size (MB)": 377.84 }, { "id": 30, "Model": "Hybrid Task Cascade", "Model Name": "Hybrid Task Cascade", "Backbone": "X-101-64x4d-FPN", "Lr schd": "20e", "Memory (GB)": "14.5", "Inference Time (fps)": "4.4", "box AP": "47.0", "mask AP": "41.4", "Config": "mmdetection/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth", "Checkpoint": "mmdetection/checkpoints/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth", "Checkpoint Size (MB)": 527.92 }, { "id": 31, "Model": "YOLACT", "Model Name": "YOLACT", "Backbone": "Resnet50-FPN", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "42.5", "box AP": "-", "mask AP": "29.0", "Config": "mmdetection/configs/yolact/yolact_r50_1x8_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth", "Checkpoint": "mmdetection/checkpoints/yolact_r50_1x8_coco_20200908-f38d58df.pth", "Checkpoint Size (MB)": 134.96 }, { "id": 32, "Model": "YOLACT", "Model Name": "YOLACT", "Backbone": "Resnet50-FPN", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "42.5", "box AP": "-", "mask AP": "28.4", "Config": "mmdetection/configs/yolact/yolact_r50_8x8_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth", "Checkpoint": "mmdetection/checkpoints/yolact_r50_8x8_coco_20200908-ca34f5db.pth", "Checkpoint Size (MB)": 134.96 }, { "id": 33, "Model": "YOLACT", "Model Name": "YOLACT", "Backbone": "Resnet101-FPN", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "33.5", "box AP": "-", "mask AP": "30.4", "Config": "mmdetection/configs/yolact/yolact_r101_1x8_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth", "Checkpoint": "mmdetection/checkpoints/yolact_r101_1x8_coco_20200908-4cbe9101.pth", "Checkpoint Size (MB)": 207.7 }, { "id": 34, "Model": "SOLO", "Model Name": "SOLO", "Backbone": "R-50", "Lr schd": "1x", "Memory (GB)": "8.0", "Inference Time (fps)": "14.0", "box AP": "-", "mask AP": "33.1", "Config": "mmdetection/configs/solo/solo_r50_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth", "Checkpoint": "mmdetection/checkpoints/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth", "Checkpoint Size (MB)": 138.75 }, { "id": 35, "Model": "SOLO", "Model Name": "SOLO", "Backbone": "R-50", "Lr schd": "3x", "Memory (GB)": "7.4", "Inference Time (fps)": "14.0", "box AP": "-", "mask AP": "35.9", "Config": "mmdetection/configs/solo/solo_r50_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth", "Checkpoint": "mmdetection/checkpoints/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth", "Checkpoint Size (MB)": 138.75 }, { "id": 36, "Model": "SOLO", "Model Name": "Decoupled SOLO", "Backbone": "R-50", "Lr schd": "1x", "Memory (GB)": "7.8", "Inference Time (fps)": "12.5", "box AP": "-", "mask AP": "33.9", "Config": "mmdetection/configs/solo/decoupled_solo_r50_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth", "Checkpoint": "mmdetection/checkpoints/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth", "Checkpoint Size (MB)": 152.97 }, { "id": 37, "Model": "SOLO", "Model Name": "Decoupled SOLO", "Backbone": "R-50", "Lr schd": "3x", "Memory (GB)": "7.9", "Inference Time (fps)": "12.5", "box AP": "-", "mask AP": "36.7", "Config": "mmdetection/configs/solo/decoupled_solo_r50_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth", "Checkpoint": "mmdetection/checkpoints/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth", "Checkpoint Size (MB)": 152.97 }, { "id": 38, "Model": "SOLO", "Model Name": "Decoupled Light SOLO", "Backbone": "R-50", "Lr schd": "3x", "Memory (GB)": "2.2", "Inference Time (fps)": "31.2", "box AP": "-", "mask AP": "32.9", "Config": "mmdetection/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth", "Checkpoint": "mmdetection/checkpoints/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth", "Checkpoint Size (MB)": 123.69 }, { "id": 39, "Model": "SOLOv2", "Model Name": "SOLOv2", "Backbone": "R-50", "Lr schd": "1x", "Memory (GB)": "5.1", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "34.8", "Config": "mmdetection/configs/solov2/solov2_r50_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth", "Checkpoint": "mmdetection/checkpoints/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth", "Checkpoint Size (MB)": 178.02 }, { "id": 40, "Model": "SOLOv2", "Model Name": "SOLOv2", "Backbone": "R-50", "Lr schd": "3x", "Memory (GB)": "5.1", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "37.5", "Config": "mmdetection/configs/solov2/solov2_r50_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth", "Checkpoint": "mmdetection/checkpoints/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth", "Checkpoint Size (MB)": 178.02 }, { "id": 41, "Model": "SOLOv2", "Model Name": "SOLOv2", "Backbone": "R-101", "Lr schd": "3x", "Memory (GB)": "6.9", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "39.1", "Config": "mmdetection/configs/solov2/solov2_r101_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth", "Checkpoint": "mmdetection/checkpoints/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth", "Checkpoint Size (MB)": 250.72 }, { "id": 42, "Model": "SOLOv2", "Model Name": "SOLOv2", "Backbone": "R-101(DCN)", "Lr schd": "3x", "Memory (GB)": "7.1", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "41.2", "Config": "mmdetection/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth", "Checkpoint": "mmdetection/checkpoints/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth", "Checkpoint Size (MB)": 262.74 }, { "id": 43, "Model": "SOLOv2", "Model Name": "SOLOv2", "Backbone": "X-101(DCN)", "Lr schd": "3x", "Memory (GB)": "11.3", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "42.4", "Config": "mmdetection/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth", "Checkpoint": "mmdetection/checkpoints/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth", "Checkpoint Size (MB)": 433.59 }, { "id": 44, "Model": "SOLOv2", "Model Name": "Light SOLOv2", "Backbone": "R-18", "Lr schd": "3x", "Memory (GB)": "9.1", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "29.7", "Config": "mmdetection/configs/solov2/solov2_light_r18_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth", "Checkpoint": "mmdetection/checkpoints/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth", "Checkpoint Size (MB)": 69.78 }, { "id": 45, "Model": "SOLOv2", "Model Name": "Light SOLOv2", "Backbone": "R-50", "Lr schd": "3x", "Memory (GB)": "9.9", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "33.7", "Config": "mmdetection/configs/solov2/solov2_light_r50_fpn_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth", "Checkpoint": "mmdetection/checkpoints/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth", "Checkpoint Size (MB)": 119.84 }, { "id": 46, "Model": "SCNet", "Model Name": "SCNet", "Backbone": "R-50-FPN", "Lr schd": "1x", "Memory (GB)": "7.0", "Inference Time (fps)": "6.2", "box AP": "43.5", "mask AP": "39.2", "Config": "mmdetection/configs/scnet/scnet_r50_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth", "Checkpoint": "mmdetection/checkpoints/scnet_r50_fpn_1x_coco-c3f09857.pth", "Checkpoint Size (MB)": 361.98 }, { "id": 47, "Model": "SCNet", "Model Name": "SCNet", "Backbone": "R-50-FPN", "Lr schd": "20e", "Memory (GB)": "7.0", "Inference Time (fps)": "6.2", "box AP": "44.5", "mask AP": "40.0", "Config": "mmdetection/configs/scnet/scnet_r50_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth", "Checkpoint": "mmdetection/checkpoints/scnet_r50_fpn_20e_coco-a569f645.pth", "Checkpoint Size (MB)": 361.98 }, { "id": 48, "Model": "SCNet", "Model Name": "SCNet", "Backbone": "R-101-FPN", "Lr schd": "20e", "Memory (GB)": "8.9", "Inference Time (fps)": "5.8", "box AP": "45.8", "mask AP": "40.9", "Config": "mmdetection/configs/scnet/scnet_r101_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth", "Checkpoint": "mmdetection/checkpoints/scnet_r101_fpn_20e_coco-294e312c.pth", "Checkpoint Size (MB)": 434.73 }, { "id": 49, "Model": "SCNet", "Model Name": "SCNet", "Backbone": "X-101-64x4d-FPN", "Lr schd": "20e", "Memory (GB)": "13.2", "Inference Time (fps)": "4.9", "box AP": "47.5", "mask AP": "42.3", "Config": "mmdetection/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth", "Checkpoint": "mmdetection/checkpoints/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth", "Checkpoint Size (MB)": 583.52 }, { "id": 50, "Model": "QueryInst", "Model Name": "QueryInst", "Backbone": "R-50-FPN", "Lr schd": "1x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "42.0", "mask AP": "37.5", "Config": "mmdetection/configs/queryinst/queryinst_r50_fpn_1x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth", "Checkpoint": "mmdetection/checkpoints/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth", "Checkpoint Size (MB)": 659.15 }, { "id": 51, "Model": "QueryInst", "Model Name": "QueryInst", "Backbone": "R-50-FPN", "Lr schd": "3x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "44.8", "mask AP": "39.8", "Config": "mmdetection/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth", "Checkpoint": "mmdetection/checkpoints/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth", "Checkpoint Size (MB)": 659.16 }, { "id": 52, "Model": "QueryInst", "Model Name": "QueryInst", "Backbone": "R-50-FPN", "Lr schd": "3x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "47.5", "mask AP": "41.7", "Config": "mmdetection/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth", "Checkpoint": "mmdetection/checkpoints/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth", "Checkpoint Size (MB)": 659.36 }, { "id": 53, "Model": "QueryInst", "Model Name": "QueryInst", "Backbone": "R-101-FPN", "Lr schd": "3x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "46.4", "mask AP": "41.0", "Config": "mmdetection/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth", "Checkpoint": "mmdetection/checkpoints/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth", "Checkpoint Size (MB)": 731.85 }, { "id": 54, "Model": "QueryInst", "Model Name": "QueryInst", "Backbone": "R-101-FPN", "Lr schd": "3x", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "49.0", "mask AP": "42.9", "Config": "mmdetection/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth", "Checkpoint": "mmdetection/checkpoints/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth", "Checkpoint Size (MB)": 732.05 }, { "id": 55, "Model": "Mask2Former", "Model Name": "Mask2Former", "Backbone": "R-50", "Lr schd": "50e", "Memory (GB)": "13.7", "Inference Time (fps)": "-", "box AP": "45.7", "mask AP": "42.9", "Config": "mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth", "Checkpoint": "mmdetection/checkpoints/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth", "Checkpoint Size (MB)": 168.3 }, { "id": 56, "Model": "Mask2Former", "Model Name": "Mask2Former", "Backbone": "R-101", "Lr schd": "50e", "Memory (GB)": "15.5", "Inference Time (fps)": "-", "box AP": "46.7", "mask AP": "44.0", "Config": "mmdetection/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth", "Checkpoint": "mmdetection/checkpoints/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth", "Checkpoint Size (MB)": 241.0 }, { "id": 57, "Model": "Mask2Former", "Model Name": "Mask2Former", "Backbone": "Swin-T", "Lr schd": "50e", "Memory (GB)": "15.3", "Inference Time (fps)": "-", "box AP": "47.7", "mask AP": "44.7", "Config": "mmdetection/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth", "Checkpoint": "mmdetection/checkpoints/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth", "Checkpoint Size (MB)": 181.28 }, { "id": 58, "Model": "Mask2Former", "Model Name": "Mask2Former", "Backbone": "Swin-S", "Lr schd": "50e", "Memory (GB)": "18.8", "Inference Time (fps)": "-", "box AP": "49.3", "mask AP": "46.1", "Config": "mmdetection/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py", "Checkpoint_link": "https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth", "Checkpoint": "mmdetection/checkpoints/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth", "Checkpoint Size (MB)": 262.86 }, { "id": 59, "Model": "SAM", "Model Name": "ViT-H SAM model", "Backbone": "ViT-H", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "-", "Config": "-", "Checkpoint_link": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", "Checkpoint": "mmdetection/checkpoints/sam_vit_h_4b8939.pth", "Checkpoint Size (MB)": 2445.75 }, { "id": 60, "Model": "SAM", "Model Name": "ViT-L SAM model", "Backbone": "ViT-L", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "-", "Config": "-", "Checkpoint_link": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth", "Checkpoint": "mmdetection/checkpoints/sam_vit_l_0b3195.pth", "Checkpoint Size (MB)": 1191.64 }, { "id": 61, "Model": "SAM", "Model Name": "ViT-B SAM model", "Backbone": "ViT-B", "Lr schd": "-", "Memory (GB)": "-", "Inference Time (fps)": "-", "box AP": "-", "mask AP": "-", "Config": "-", "Checkpoint_link": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", "Checkpoint": "mmdetection/checkpoints/sam_vit_b_01ec64.pth", "Checkpoint Size (MB)": 357.67 } ] ================================================ FILE: DLTA_AI_app/models_menu/samScraper.py ================================================ import requests from bs4 import BeautifulSoup import json import requests url = 'https://github.com/facebookresearch/segment-anything/blob/main/README.md' page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') # get all ul inside article tag ul = soup.find('article').find_all('ul') models_json = [] # get all li inside ul li = ul[0].find_all('li') for i in li: model = {} #print(i.find('a').text.split(" ")[0]) # get text inside a tag (model name) name = i.find('a').text.split(" ")[0] name = name.replace("-", "_").lower() model['name'] = name #print(i.find('a')['href']) # get href inside a tag) model['url'] = i.find('a')['href'] checkpoint = "mmdetection/checkpoints/" + i.find('a')['href'].split("/")[-1] model['checkpoint'] = checkpoint models_json.append(model) with open ("sam_models.json", "w") as f: json.dump(models_json, f, indent=4) ================================================ FILE: DLTA_AI_app/models_menu/sam_models.json ================================================ [ { "name": "vit_h", "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", "checkpoint": "mmdetection/checkpoints/sam_vit_h_4b8939.pth" }, { "name": "vit_l", "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth", "checkpoint": "mmdetection/checkpoints/sam_vit_l_0b3195.pth" }, { "name": "vit_b", "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", "checkpoint": "mmdetection/checkpoints/sam_vit_b_01ec64.pth" } ] ================================================ FILE: DLTA_AI_app/setup.py ================================================ from __future__ import print_function import distutils.spawn import os import re import shlex import subprocess import sys from setuptools import find_packages from setuptools import setup # def get_version(): # filename = "labelme/__init__.py" # with open(filename) as f: # match = re.search( # r"""^__version__ = ['"]([^'"]*)['"]""", f.read(), re.M # ) # if not match: # raise RuntimeError("{} doesn't contain __version__".format(filename)) # version = match.groups()[0] # return version # def get_install_requires(): # PY3 = sys.version_info[0] == 3 # PY2 = sys.version_info[0] == 2 # assert PY3 or PY2 # install_requires = [ # "imgviz>=0.11,<1.3", # "matplotlib<3.3", # for PyInstaller # "numpy", # "Pillow>=2.8", # "PyYAML", # "PyQt6", # "termcolor", # ] # # Find python binding for qt with priority: # # PyQt6 -> PySide2 -> PyQt4, # # and PyQt6 is automatically installed on Python3. # QT_BINDING = None # try: # import PyQt6 # NOQA # QT_BINDING = "pyqt5" # except ImportError: # pass # if QT_BINDING is None: # try: # import PySide2 # NOQA # QT_BINDING = "pyside2" # except ImportError: # pass # if QT_BINDING is None: # try: # import PyQt4 # NOQA # QT_BINDING = "pyqt4" # except ImportError: # if PY2: # print( # "Please install PyQt6, PySide2 or PyQt4 for Python2.\n" # "Note that PyQt6 can be installed via pip for Python3.", # file=sys.stderr, # ) # sys.exit(1) # assert PY3 # # PyQt6 can be installed via pip for Python3 # # 5.15.3, 5.15.4 won't work with PyInstaller # install_requires.append("PyQt6!=5.15.3,!=5.15.4") # QT_BINDING = "pyqt5" # del QT_BINDING # if os.name == "nt": # Windows # install_requires.append("colorama") # return install_requires def get_long_description(): with open("README.md") as f: long_description = f.read() try: import github2pypi return github2pypi.replace_url( slug="wkentaro/labelme", content=long_description ) except Exception: return long_description def main(): version = get_version() if sys.argv[1] == "release": if not distutils.spawn.find_executable("twine"): print( "Please install twine:\n\n\tpip install twine\n", file=sys.stderr, ) sys.exit(1) commands = [ "python tests/docs_tests/man_tests/test_labelme_1.py", "git tag v{:s}".format(version), "git push origin master --tag", "python setup.py sdist", "twine upload dist/labelme-{:s}.tar.gz".format(version), ] for cmd in commands: print("+ {:s}".format(cmd)) subprocess.check_call(shlex.split(cmd)) sys.exit(0) setup( name="labelme", version=version, packages=find_packages(exclude=["github2pypi"]), description="Image Polygonal Annotation with Python", long_description=get_long_description(), long_description_content_type="text/markdown", author="Kentaro Wada", author_email="www.kentaro.wada@gmail.com", url="https://github.com/wkentaro/labelme", install_requires=get_install_requires(), license="GPLv3", keywords="Image Annotation, Machine Learning", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ], package_data={"labelme": ["icons/*", "config/*.yaml"]}, entry_points={ "console_scripts": [ "labelme=labelme.__main__:main", "labelme_draw_json=labelme.cli.draw_json:main", "labelme_draw_label_png=labelme.cli.draw_label_png:main", "labelme_json_to_dataset=labelme.cli.json_to_dataset:main", "labelme_on_docker=labelme.cli.on_docker:main", ], }, data_files=[("share/man/man1", ["docs/man/labelme.1"])], ) if __name__ == "__main__": main() ================================================ FILE: DLTA_AI_app/tempCodeRunnerFile.py ================================================ test_vid_1 ================================================ FILE: DLTA_AI_app/trackers/__init__.py ================================================ ================================================ FILE: DLTA_AI_app/trackers/botsort/basetrack.py ================================================ import numpy as np from collections import OrderedDict class TrackState(object): New = 0 Tracked = 1 Lost = 2 LongLost = 3 Removed = 4 class BaseTrack(object): _count = 0 track_id = 0 is_activated = False state = TrackState.New history = OrderedDict() features = [] curr_feature = None score = 0 start_frame = 0 frame_id = 0 time_since_update = 0 # multi-camera location = (np.inf, np.inf) @property def end_frame(self): return self.frame_id @staticmethod def next_id(): BaseTrack._count += 1 return BaseTrack._count def activate(self, *args): raise NotImplementedError def predict(self): raise NotImplementedError def update(self, *args, **kwargs): raise NotImplementedError def mark_lost(self): self.state = TrackState.Lost def mark_long_lost(self): self.state = TrackState.LongLost def mark_removed(self): self.state = TrackState.Removed @staticmethod def clear_count(): BaseTrack._count = 0 ================================================ FILE: DLTA_AI_app/trackers/botsort/bot_sort.py ================================================ import cv2 import matplotlib.pyplot as plt import numpy as np from collections import deque from trackers.botsort import matching from trackers.botsort.gmc import GMC from trackers.botsort.basetrack import BaseTrack, TrackState from trackers.botsort.kalman_filter import KalmanFilter # from fast_reid.fast_reid_interfece import FastReIDInterface from .reid_multibackend import ReIDDetectMultiBackend from ultralytics.yolo.utils.ops import xyxy2xywh, xywh2xyxy class STrack(BaseTrack): shared_kalman = KalmanFilter() def __init__(self, tlwh, score, cls, feat=None, feat_history=50): # wait activate self._tlwh = np.asarray(tlwh, dtype=np.float32) self.kalman_filter = None self.mean, self.covariance = None, None self.is_activated = False self.cls = -1 self.cls_hist = [] # (cls id, freq) self.update_cls(cls, score) self.score = score self.tracklet_len = 0 self.smooth_feat = None self.curr_feat = None if feat is not None: self.update_features(feat) self.features = deque([], maxlen=feat_history) self.alpha = 0.9 def update_features(self, feat): feat /= np.linalg.norm(feat) self.curr_feat = feat if self.smooth_feat is None: self.smooth_feat = feat else: self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat self.features.append(feat) self.smooth_feat /= np.linalg.norm(self.smooth_feat) def update_cls(self, cls, score): if len(self.cls_hist) > 0: max_freq = 0 found = False for c in self.cls_hist: if cls == c[0]: c[1] += score found = True if c[1] > max_freq: max_freq = c[1] self.cls = c[0] if not found: self.cls_hist.append([cls, score]) self.cls = cls else: self.cls_hist.append([cls, score]) self.cls = cls def predict(self): mean_state = self.mean.copy() if self.state != TrackState.Tracked: mean_state[6] = 0 mean_state[7] = 0 self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) @staticmethod def multi_predict(stracks): if len(stracks) > 0: multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) for i, st in enumerate(stracks): if st.state != TrackState.Tracked: multi_mean[i][6] = 0 multi_mean[i][7] = 0 multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): stracks[i].mean = mean stracks[i].covariance = cov @staticmethod def multi_gmc(stracks, H=np.eye(2, 3)): if len(stracks) > 0: multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) R = H[:2, :2] R8x8 = np.kron(np.eye(4, dtype=float), R) t = H[:2, 2] for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): mean = R8x8.dot(mean) mean[:2] += t cov = R8x8.dot(cov).dot(R8x8.transpose()) stracks[i].mean = mean stracks[i].covariance = cov def activate(self, kalman_filter, frame_id): """Start a new tracklet""" self.kalman_filter = kalman_filter self.track_id = self.next_id() self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xywh(self._tlwh)) self.tracklet_len = 0 self.state = TrackState.Tracked if frame_id == 1: self.is_activated = True self.frame_id = frame_id self.start_frame = frame_id def re_activate(self, new_track, frame_id, new_id=False): self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xywh(new_track.tlwh)) if new_track.curr_feat is not None: self.update_features(new_track.curr_feat) self.tracklet_len = 0 self.state = TrackState.Tracked self.is_activated = True self.frame_id = frame_id if new_id: self.track_id = self.next_id() self.score = new_track.score self.update_cls(new_track.cls, new_track.score) def update(self, new_track, frame_id): """ Update a matched track :type new_track: STrack :type frame_id: int :type update_feature: bool :return: """ self.frame_id = frame_id self.tracklet_len += 1 new_tlwh = new_track.tlwh self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xywh(new_tlwh)) if new_track.curr_feat is not None: self.update_features(new_track.curr_feat) self.state = TrackState.Tracked self.is_activated = True self.score = new_track.score self.update_cls(new_track.cls, new_track.score) @property def tlwh(self): """Get current position in bounding box format `(top left x, top left y, width, height)`. """ if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() ret[:2] -= ret[2:] / 2 return ret @property def tlbr(self): """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`. """ ret = self.tlwh.copy() ret[2:] += ret[:2] return ret @property def xywh(self): """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`. """ ret = self.tlwh.copy() ret[:2] += ret[2:] / 2.0 return ret @staticmethod def tlwh_to_xyah(tlwh): """Convert bounding box to format `(center x, center y, aspect ratio, height)`, where the aspect ratio is `width / height`. """ ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 ret[2] /= ret[3] return ret @staticmethod def tlwh_to_xywh(tlwh): """Convert bounding box to format `(center x, center y, width, height)`. """ ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 return ret def to_xywh(self): return self.tlwh_to_xywh(self.tlwh) @staticmethod def tlbr_to_tlwh(tlbr): ret = np.asarray(tlbr).copy() ret[2:] -= ret[:2] return ret @staticmethod def tlwh_to_tlbr(tlwh): ret = np.asarray(tlwh).copy() ret[2:] += ret[:2] return ret def __repr__(self): return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame) class BoTSORT(object): def __init__(self, model_weights, device, fp16, track_high_thresh:float = 0.45, new_track_thresh:float = 0.6, track_buffer:int = 30, match_thresh:float = 0.8, proximity_thresh:float = 0.5, appearance_thresh:float = 0.25, cmc_method:str = 'sparseOptFlow', frame_rate=30, lambda_=0.985 ): self.tracked_stracks = [] # type: list[STrack] self.lost_stracks = [] # type: list[STrack] self.removed_stracks = [] # type: list[STrack] BaseTrack.clear_count() self.frame_id = 0 self.lambda_ = lambda_ self.track_high_thresh = track_high_thresh self.new_track_thresh = new_track_thresh self.buffer_size = int(frame_rate / 30.0 * track_buffer) self.max_time_lost = self.buffer_size self.kalman_filter = KalmanFilter() # ReID module self.proximity_thresh = proximity_thresh self.appearance_thresh = appearance_thresh self.match_thresh = match_thresh self.model = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16) self.gmc = GMC(method=cmc_method, verbose=[None,False]) def update(self, output_results, img): self.frame_id += 1 activated_starcks = [] refind_stracks = [] lost_stracks = [] removed_stracks = [] xyxys = output_results[:, 0:4] xywh = xyxy2xywh(xyxys.numpy()) confs = output_results[:, 4] clss = output_results[:, 5] classes = clss.numpy() xyxys = xyxys.numpy() confs = confs.numpy() remain_inds = confs > self.track_high_thresh inds_low = confs > 0.1 inds_high = confs < self.track_high_thresh inds_second = np.logical_and(inds_low, inds_high) dets_second = xywh[inds_second] dets = xywh[remain_inds] scores_keep = confs[remain_inds] scores_second = confs[inds_second] classes_keep = classes[remain_inds] clss_second = classes[inds_second] self.height, self.width = img.shape[:2] '''Extract embeddings ''' features_keep = self._get_features(dets, img) if len(dets) > 0: '''Detections''' detections = [STrack(xyxy, s, c, f.cpu().numpy()) for (xyxy, s, c, f) in zip(dets, scores_keep, classes_keep, features_keep)] else: detections = [] ''' Add newly detected tracklets to tracked_stracks''' unconfirmed = [] tracked_stracks = [] # type: list[STrack] for track in self.tracked_stracks: if not track.is_activated: unconfirmed.append(track) else: tracked_stracks.append(track) ''' Step 2: First association, with high score detection boxes''' strack_pool = joint_stracks(tracked_stracks, self.lost_stracks) # Predict the current location with KF STrack.multi_predict(strack_pool) # Fix camera motion warp = self.gmc.apply(img, dets) STrack.multi_gmc(strack_pool, warp) STrack.multi_gmc(unconfirmed, warp) # Associate with high score detection boxes raw_emb_dists = matching.embedding_distance(strack_pool, detections) dists = matching.fuse_motion(self.kalman_filter, raw_emb_dists, strack_pool, detections, only_position=False, lambda_=self.lambda_) # ious_dists = matching.iou_distance(strack_pool, detections) # ious_dists_mask = (ious_dists > self.proximity_thresh) # ious_dists = matching.fuse_score(ious_dists, detections) # emb_dists = matching.embedding_distance(strack_pool, detections) / 2.0 # raw_emb_dists = emb_dists.copy() # emb_dists[emb_dists > self.appearance_thresh] = 1.0 # emb_dists[ious_dists_mask] = 1.0 # dists = np.minimum(ious_dists, emb_dists) # Popular ReID method (JDE / FairMOT) # raw_emb_dists = matching.embedding_distance(strack_pool, detections) # dists = matching.fuse_motion(self.kalman_filter, raw_emb_dists, strack_pool, detections) # emb_dists = dists # IoU making ReID # dists = matching.embedding_distance(strack_pool, detections) # dists[ious_dists_mask] = 1.0 matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.match_thresh) for itracked, idet in matches: track = strack_pool[itracked] det = detections[idet] if track.state == TrackState.Tracked: track.update(detections[idet], self.frame_id) activated_starcks.append(track) else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) ''' Step 3: Second association, with low score detection boxes''' # if len(scores): # inds_high = scores < self.track_high_thresh # inds_low = scores > self.track_low_thresh # inds_second = np.logical_and(inds_low, inds_high) # dets_second = bboxes[inds_second] # scores_second = scores[inds_second] # classes_second = classes[inds_second] # else: # dets_second = [] # scores_second = [] # classes_second = [] # association the untrack to the low score detections if len(dets_second) > 0: '''Detections''' detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s, c) for (tlbr, s, c) in zip(dets_second, scores_second, clss_second)] else: detections_second = [] r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] dists = matching.iou_distance(r_tracked_stracks, detections_second) matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) for itracked, idet in matches: track = r_tracked_stracks[itracked] det = detections_second[idet] if track.state == TrackState.Tracked: track.update(det, self.frame_id) activated_starcks.append(track) else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) for it in u_track: track = r_tracked_stracks[it] if not track.state == TrackState.Lost: track.mark_lost() lost_stracks.append(track) '''Deal with unconfirmed tracks, usually tracks with only one beginning frame''' detections = [detections[i] for i in u_detection] ious_dists = matching.iou_distance(unconfirmed, detections) ious_dists_mask = (ious_dists > self.proximity_thresh) ious_dists = matching.fuse_score(ious_dists, detections) emb_dists = matching.embedding_distance(unconfirmed, detections) / 2.0 raw_emb_dists = emb_dists.copy() emb_dists[emb_dists > self.appearance_thresh] = 1.0 emb_dists[ious_dists_mask] = 1.0 dists = np.minimum(ious_dists, emb_dists) matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) for itracked, idet in matches: unconfirmed[itracked].update(detections[idet], self.frame_id) activated_starcks.append(unconfirmed[itracked]) for it in u_unconfirmed: track = unconfirmed[it] track.mark_removed() removed_stracks.append(track) """ Step 4: Init new stracks""" for inew in u_detection: track = detections[inew] if track.score < self.new_track_thresh: continue track.activate(self.kalman_filter, self.frame_id) activated_starcks.append(track) """ Step 5: Update state""" for track in self.lost_stracks: if self.frame_id - track.end_frame > self.max_time_lost: track.mark_removed() removed_stracks.append(track) """ Merge """ self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks) self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks) self.lost_stracks.extend(lost_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) self.removed_stracks.extend(removed_stracks) self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) # output_stracks = [track for track in self.tracked_stracks if track.is_activated] output_stracks = [track for track in self.tracked_stracks if track.is_activated] outputs = [] for t in output_stracks: output= [] tlwh = t.tlwh tid = t.track_id tlwh = np.expand_dims(tlwh, axis=0) xyxy = xywh2xyxy(tlwh) xyxy = np.squeeze(xyxy, axis=0) output.extend(xyxy) output.append(tid) output.append(t.cls) output.append(t.score) outputs.append(output) return outputs def _xywh_to_xyxy(self, bbox_xywh): x, y, w, h = bbox_xywh x1 = max(int(x - w / 2), 0) x2 = min(int(x + w / 2), self.width - 1) y1 = max(int(y - h / 2), 0) y2 = min(int(y + h / 2), self.height - 1) return x1, y1, x2, y2 def _get_features(self, bbox_xywh, ori_img): im_crops = [] for box in bbox_xywh: x1, y1, x2, y2 = self._xywh_to_xyxy(box) im = ori_img[y1:y2, x1:x2] im_crops.append(im) if im_crops: features = self.model(im_crops) else: features = np.array([]) return features def joint_stracks(tlista, tlistb): exists = {} res = [] for t in tlista: exists[t.track_id] = 1 res.append(t) for t in tlistb: tid = t.track_id if not exists.get(tid, 0): exists[tid] = 1 res.append(t) return res def sub_stracks(tlista, tlistb): stracks = {} for t in tlista: stracks[t.track_id] = t for t in tlistb: tid = t.track_id if stracks.get(tid, 0): del stracks[tid] return list(stracks.values()) def remove_duplicate_stracks(stracksa, stracksb): pdist = matching.iou_distance(stracksa, stracksb) pairs = np.where(pdist < 0.15) dupa, dupb = list(), list() for p, q in zip(*pairs): timep = stracksa[p].frame_id - stracksa[p].start_frame timeq = stracksb[q].frame_id - stracksb[q].start_frame if timep > timeq: dupb.append(q) else: dupa.append(p) resa = [t for i, t in enumerate(stracksa) if not i in dupa] resb = [t for i, t in enumerate(stracksb) if not i in dupb] return resa, resb ================================================ FILE: DLTA_AI_app/trackers/botsort/configs/botsort.yaml ================================================ # Trial number: 232 # HOTA, MOTA, IDF1: [45.31] botsort: appearance_thresh: 0.4818211117541298 cmc_method: sparseOptFlow conf_thres: 0.3501265956918775 frame_rate: 30 lambda_: 0.9896143462366406 match_thresh: 0.22734550911325851 new_track_thresh: 0.21144301345190655 proximity_thresh: 0.5945380911899254 track_buffer: 60 track_high_thresh: 0.33824964456239337 ================================================ FILE: DLTA_AI_app/trackers/botsort/gmc.py ================================================ import cv2 import matplotlib.pyplot as plt import numpy as np import copy import time class GMC: def __init__(self, method='sparseOptFlow', downscale=2, verbose=None): super(GMC, self).__init__() self.method = method self.downscale = max(1, int(downscale)) if self.method == 'orb': self.detector = cv2.FastFeatureDetector_create(20) self.extractor = cv2.ORB_create() self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING) elif self.method == 'sift': self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) self.matcher = cv2.BFMatcher(cv2.NORM_L2) elif self.method == 'ecc': number_of_iterations = 5000 termination_eps = 1e-6 self.warp_mode = cv2.MOTION_EUCLIDEAN self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) elif self.method == 'sparseOptFlow': self.feature_params = dict(maxCorners=1000, qualityLevel=0.01, minDistance=1, blockSize=3, useHarrisDetector=False, k=0.04) # self.gmc_file = open('GMC_results.txt', 'w') elif self.method == 'file' or self.method == 'files': seqName = verbose[0] ablation = verbose[1] if ablation: filePath = r'tracker/GMC_files/MOT17_ablation' else: filePath = r'tracker/GMC_files/MOTChallenge' if '-FRCNN' in seqName: seqName = seqName[:-6] elif '-DPM' in seqName: seqName = seqName[:-4] elif '-SDP' in seqName: seqName = seqName[:-4] self.gmcFile = open(filePath + "/GMC-" + seqName + ".txt", 'r') if self.gmcFile is None: raise ValueError("Error: Unable to open GMC file in directory:" + filePath) elif self.method == 'none' or self.method == 'None': self.method = 'none' else: raise ValueError("Error: Unknown CMC method:" + method) self.prevFrame = None self.prevKeyPoints = None self.prevDescriptors = None self.initializedFirstFrame = False def apply(self, raw_frame, detections=None): if self.method == 'orb' or self.method == 'sift': return self.applyFeaures(raw_frame, detections) elif self.method == 'ecc': return self.applyEcc(raw_frame, detections) elif self.method == 'sparseOptFlow': return self.applySparseOptFlow(raw_frame, detections) elif self.method == 'file': return self.applyFile(raw_frame, detections) elif self.method == 'none': return np.eye(2, 3) else: return np.eye(2, 3) def applyEcc(self, raw_frame, detections=None): # Initialize height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3, dtype=np.float32) # Downscale image (TODO: consider using pyramids) if self.downscale > 1.0: frame = cv2.GaussianBlur(frame, (3, 3), 1.5) frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) width = width // self.downscale height = height // self.downscale # Handle first frame if not self.initializedFirstFrame: # Initialize data self.prevFrame = frame.copy() # Initialization done self.initializedFirstFrame = True return H # Run the ECC algorithm. The results are stored in warp_matrix. # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria) try: (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1) except: print('Warning: find transform failed. Set warp as identity') return H def applyFeaures(self, raw_frame, detections=None): # Initialize height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3) # Downscale image (TODO: consider using pyramids) if self.downscale > 1.0: # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) width = width // self.downscale height = height // self.downscale # find the keypoints mask = np.zeros_like(frame) # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255 mask[int(0.02 * height): int(0.98 * height), int(0.02 * width): int(0.98 * width)] = 255 if detections is not None: for det in detections: tlbr = (det[:4] / self.downscale).astype(np.int_) mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0 keypoints = self.detector.detect(frame, mask) # compute the descriptors keypoints, descriptors = self.extractor.compute(frame, keypoints) # Handle first frame if not self.initializedFirstFrame: # Initialize data self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) # Initialization done self.initializedFirstFrame = True return H # Match descriptors. knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2) # Filtered matches based on smallest spatial distance matches = [] spatialDistances = [] maxSpatialDistance = 0.25 * np.array([width, height]) # Handle empty matches case if len(knnMatches) == 0: # Store to next iteration self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) return H for m, n in knnMatches: if m.distance < 0.9 * n.distance: prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt currKeyPointLocation = keypoints[m.trainIdx].pt spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0], prevKeyPointLocation[1] - currKeyPointLocation[1]) if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \ (np.abs(spatialDistance[1]) < maxSpatialDistance[1]): spatialDistances.append(spatialDistance) matches.append(m) meanSpatialDistances = np.mean(spatialDistances, 0) stdSpatialDistances = np.std(spatialDistances, 0) inliesrs = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances goodMatches = [] prevPoints = [] currPoints = [] for i in range(len(matches)): if inliesrs[i, 0] and inliesrs[i, 1]: goodMatches.append(matches[i]) prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt) currPoints.append(keypoints[matches[i].trainIdx].pt) prevPoints = np.array(prevPoints) currPoints = np.array(currPoints) # Draw the keypoint matches on the output image if 0: matches_img = np.hstack((self.prevFrame, frame)) matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) W = np.size(self.prevFrame, 1) for m in goodMatches: prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) curr_pt[0] += W color = np.random.randint(0, 255, (3,)) color = (int(color[0]), int(color[1]), int(color[2])) matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA) matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1) matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1) plt.figure() plt.imshow(matches_img) plt.show() # Find rigid matrix if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) # Handle downscale if self.downscale > 1.0: H[0, 2] *= self.downscale H[1, 2] *= self.downscale else: print('Warning: not enough matching points') # Store to next iteration self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) return H def applySparseOptFlow(self, raw_frame, detections=None): t0 = time.time() # Initialize height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3) # Downscale image if self.downscale > 1.0: # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) # find the keypoints keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params) # Handle first frame if not self.initializedFirstFrame: # Initialize data self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) # Initialization done self.initializedFirstFrame = True return H # find correspondences matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None) # leave good correspondences only prevPoints = [] currPoints = [] for i in range(len(status)): if status[i]: prevPoints.append(self.prevKeyPoints[i]) currPoints.append(matchedKeypoints[i]) prevPoints = np.array(prevPoints) currPoints = np.array(currPoints) # Find rigid matrix if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) # Handle downscale if self.downscale > 1.0: H[0, 2] *= self.downscale H[1, 2] *= self.downscale else: print('Warning: not enough matching points') # Store to next iteration self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) t1 = time.time() # gmc_line = str(1000 * (t1 - t0)) + "\t" + str(H[0, 0]) + "\t" + str(H[0, 1]) + "\t" + str( # H[0, 2]) + "\t" + str(H[1, 0]) + "\t" + str(H[1, 1]) + "\t" + str(H[1, 2]) + "\n" # self.gmc_file.write(gmc_line) return H def applyFile(self, raw_frame, detections=None): line = self.gmcFile.readline() tokens = line.split("\t") H = np.eye(2, 3, dtype=np.float_) H[0, 0] = float(tokens[1]) H[0, 1] = float(tokens[2]) H[0, 2] = float(tokens[3]) H[1, 0] = float(tokens[4]) H[1, 1] = float(tokens[5]) H[1, 2] = float(tokens[6]) return H ================================================ FILE: DLTA_AI_app/trackers/botsort/kalman_filter.py ================================================ # vim: expandtab:ts=4:sw=4 import numpy as np import scipy.linalg """ Table for the 0.95 quantile of the chi-square distribution with N degrees of freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv function and used as Mahalanobis gating threshold. """ chi2inv95 = { 1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919} class KalmanFilter(object): """ A simple Kalman filter for tracking bounding boxes in image space. The 8-dimensional state space x, y, w, h, vx, vy, vw, vh contains the bounding box center position (x, y), width w, height h, and their respective velocities. Object motion follows a constant velocity model. The bounding box location (x, y, w, h) is taken as direct observation of the state space (linear observation model). """ def __init__(self): ndim, dt = 4, 1. # Create Kalman filter model matrices. self._motion_mat = np.eye(2 * ndim, 2 * ndim) for i in range(ndim): self._motion_mat[i, ndim + i] = dt self._update_mat = np.eye(ndim, 2 * ndim) # Motion and observation uncertainty are chosen relative to the current # state estimate. These weights control the amount of uncertainty in # the model. This is a bit hacky. self._std_weight_position = 1. / 20 self._std_weight_velocity = 1. / 160 def initiate(self, measurement): """Create track from unassociated measurement. Parameters ---------- measurement : ndarray Bounding box coordinates (x, y, w, h) with center position (x, y), width w, and height h. Returns ------- (ndarray, ndarray) Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean. """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3], 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3]] covariance = np.diag(np.square(std)) return mean, covariance def predict(self, mean, covariance): """Run Kalman filter prediction step. Parameters ---------- mean : ndarray The 8 dimensional mean vector of the object state at the previous time step. covariance : ndarray The 8x8 dimensional covariance matrix of the object state at the previous time step. Returns ------- (ndarray, ndarray) Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[2], self._std_weight_position * mean[3], self._std_weight_position * mean[2], self._std_weight_position * mean[3]] std_vel = [ self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3]] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) mean = np.dot(mean, self._motion_mat.T) covariance = np.linalg.multi_dot(( self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance def project(self, mean, covariance): """Project state distribution to measurement space. Parameters ---------- mean : ndarray The state's mean vector (8 dimensional array). covariance : ndarray The state's covariance matrix (8x8 dimensional). Returns ------- (ndarray, ndarray) Returns the projected mean and covariance matrix of the given state estimate. """ std = [ self._std_weight_position * mean[2], self._std_weight_position * mean[3], self._std_weight_position * mean[2], self._std_weight_position * mean[3]] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot(( self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov def multi_predict(self, mean, covariance): """Run Kalman filter prediction step (Vectorized version). Parameters ---------- mean : ndarray The Nx8 dimensional mean matrix of the object states at the previous time step. covariance : ndarray The Nx8x8 dimensional covariance matrics of the object states at the previous time step. Returns ------- (ndarray, ndarray) Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3]] std_vel = [ self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3]] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [] for i in range(len(mean)): motion_cov.append(np.diag(sqr[i])) motion_cov = np.asarray(motion_cov) mean = np.dot(mean, self._motion_mat.T) left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) covariance = np.dot(left, self._motion_mat.T) + motion_cov return mean, covariance def update(self, mean, covariance, measurement): """Run Kalman filter correction step. Parameters ---------- mean : ndarray The predicted state's mean vector (8 dimensional). covariance : ndarray The state's covariance matrix (8x8 dimensional). measurement : ndarray The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center position, w the width, and h the height of the bounding box. Returns ------- (ndarray, ndarray) Returns the measurement-corrected state distribution. """ projected_mean, projected_cov = self.project(mean, covariance) chol_factor, lower = scipy.linalg.cho_factor( projected_cov, lower=True, check_finite=False) kalman_gain = scipy.linalg.cho_solve( (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False).T innovation = measurement - projected_mean new_mean = mean + np.dot(innovation, kalman_gain.T) new_covariance = covariance - np.linalg.multi_dot(( kalman_gain, projected_cov, kalman_gain.T)) return new_mean, new_covariance def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'): """Compute gating distance between state distribution and measurements. A suitable distance threshold can be obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of freedom, otherwise 2. Parameters ---------- mean : ndarray Mean vector over the state distribution (8 dimensional). covariance : ndarray Covariance of the state distribution (8x8 dimensional). measurements : ndarray An Nx4 dimensional matrix of N measurements, each in format (x, y, a, h) where (x, y) is the bounding box center position, a the aspect ratio, and h the height. only_position : Optional[bool] If True, distance computation is done with respect to the bounding box center position only. Returns ------- ndarray Returns an array of length N, where the i-th element contains the squared Mahalanobis distance between (mean, covariance) and `measurements[i]`. """ mean, covariance = self.project(mean, covariance) if only_position: mean, covariance = mean[:2], covariance[:2, :2] measurements = measurements[:, :2] d = measurements - mean if metric == 'gaussian': return np.sum(d * d, axis=1) elif metric == 'maha': cholesky_factor = np.linalg.cholesky(covariance) z = scipy.linalg.solve_triangular( cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) squared_maha = np.sum(z * z, axis=0) return squared_maha else: raise ValueError('invalid distance metric') ================================================ FILE: DLTA_AI_app/trackers/botsort/matching.py ================================================ import numpy as np import scipy import lap from scipy.spatial.distance import cdist from trackers.botsort import kalman_filter def merge_matches(m1, m2, shape): O,P,Q = shape m1 = np.asarray(m1) m2 = np.asarray(m2) M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P)) M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q)) mask = M1*M2 match = mask.nonzero() match = list(zip(match[0], match[1])) unmatched_O = tuple(set(range(O)) - set([i for i, j in match])) unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match])) return match, unmatched_O, unmatched_Q def _indices_to_matches(cost_matrix, indices, thresh): matched_cost = cost_matrix[tuple(zip(*indices))] matched_mask = (matched_cost <= thresh) matches = indices[matched_mask] unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0])) unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1])) return matches, unmatched_a, unmatched_b def linear_assignment(cost_matrix, thresh): if cost_matrix.size == 0: return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) matches, unmatched_a, unmatched_b = [], [], [] cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) for ix, mx in enumerate(x): if mx >= 0: matches.append([ix, mx]) unmatched_a = np.where(x < 0)[0] unmatched_b = np.where(y < 0)[0] matches = np.asarray(matches) return matches, unmatched_a, unmatched_b def ious(atlbrs, btlbrs): """ Compute cost based on IoU :type atlbrs: list[tlbr] | np.ndarray :type atlbrs: list[tlbr] | np.ndarray :rtype ious np.ndarray """ ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) if ious.size == 0: return ious ious = bbox_ious( np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32) ) return ious def tlbr_expand(tlbr, scale=1.2): w = tlbr[2] - tlbr[0] h = tlbr[3] - tlbr[1] half_scale = 0.5 * scale tlbr[0] -= half_scale * w tlbr[1] -= half_scale * h tlbr[2] += half_scale * w tlbr[3] += half_scale * h return tlbr def iou_distance(atracks, btracks): """ Compute cost based on IoU :type atracks: list[STrack] :type btracks: list[STrack] :rtype cost_matrix np.ndarray """ if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): atlbrs = atracks btlbrs = btracks else: atlbrs = [track.tlbr for track in atracks] btlbrs = [track.tlbr for track in btracks] _ious = ious(atlbrs, btlbrs) cost_matrix = 1 - _ious return cost_matrix def v_iou_distance(atracks, btracks): """ Compute cost based on IoU :type atracks: list[STrack] :type btracks: list[STrack] :rtype cost_matrix np.ndarray """ if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): atlbrs = atracks btlbrs = btracks else: atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] _ious = ious(atlbrs, btlbrs) cost_matrix = 1 - _ious return cost_matrix def embedding_distance(tracks, detections, metric='cosine'): """ :param tracks: list[STrack] :param detections: list[BaseTrack] :param metric: :return: cost_matrix np.ndarray """ cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) if cost_matrix.size == 0: return cost_matrix det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32) track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32) cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # / 2.0 # Nomalized features return cost_matrix def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): if cost_matrix.size == 0: return cost_matrix gating_dim = 2 if only_position else 4 gating_threshold = kalman_filter.chi2inv95[gating_dim] # measurements = np.asarray([det.to_xyah() for det in detections]) measurements = np.asarray([det.to_xywh() for det in detections]) for row, track in enumerate(tracks): gating_distance = kf.gating_distance( track.mean, track.covariance, measurements, only_position) cost_matrix[row, gating_distance > gating_threshold] = np.inf return cost_matrix def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98): if cost_matrix.size == 0: return cost_matrix gating_dim = 2 if only_position else 4 gating_threshold = kalman_filter.chi2inv95[gating_dim] # measurements = np.asarray([det.to_xyah() for det in detections]) measurements = np.asarray([det.to_xywh() for det in detections]) for row, track in enumerate(tracks): gating_distance = kf.gating_distance( track.mean, track.covariance, measurements, only_position, metric='maha') cost_matrix[row, gating_distance > gating_threshold] = np.inf cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance return cost_matrix def fuse_iou(cost_matrix, tracks, detections): if cost_matrix.size == 0: return cost_matrix reid_sim = 1 - cost_matrix iou_dist = iou_distance(tracks, detections) iou_sim = 1 - iou_dist fuse_sim = reid_sim * (1 + iou_sim) / 2 det_scores = np.array([det.score for det in detections]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) #fuse_sim = fuse_sim * (1 + det_scores) / 2 fuse_cost = 1 - fuse_sim return fuse_cost def fuse_score(cost_matrix, detections): if cost_matrix.size == 0: return cost_matrix iou_sim = 1 - cost_matrix det_scores = np.array([det.score for det in detections]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) fuse_sim = iou_sim * det_scores fuse_cost = 1 - fuse_sim return fuse_cost def bbox_ious(boxes, query_boxes): """ Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=np.float32) for k in range(K): box_area = ( (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1) ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1 ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1 ) if ih > 0: ua = float( (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps ================================================ FILE: DLTA_AI_app/trackers/botsort/reid_multibackend.py ================================================ import torch.nn as nn import torch from pathlib import Path import numpy as np from itertools import islice import torchvision.transforms as transforms import cv2 import sys import torchvision.transforms as T from collections import OrderedDict, namedtuple import gdown from os.path import exists as file_exists from ultralytics.yolo.utils.checks import check_requirements, check_version from ultralytics.yolo.utils import LOGGER from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name, download_url, load_pretrained_weights) from trackers.strongsort.deep.models import build_model def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): # Check file(s) for acceptable suffix if file and suffix: if isinstance(suffix, str): suffix = [suffix] for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" class ReIDDetectMultiBackend(nn.Module): # ReID models MultiBackend class for python inference on various backends def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False): super().__init__() w = weights[0] if isinstance(weights, list) else weights self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w) # get backend self.fp16 = fp16 self.fp16 &= self.pt or self.jit or self.engine # FP16 # Build transform functions self.device = device self.image_size=(256, 128) self.pixel_mean=[0.485, 0.456, 0.406] self.pixel_std=[0.229, 0.224, 0.225] self.transforms = [] self.transforms += [T.Resize(self.image_size)] self.transforms += [T.ToTensor()] self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)] self.preprocess = T.Compose(self.transforms) self.to_pil = T.ToPILImage() model_name = get_model_name(w) if w.suffix == '.pt': model_url = get_model_url(w) if not file_exists(w) and model_url is not None: gdown.download(model_url, str(w), quiet=False) elif file_exists(w): pass else: print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:') show_downloadeable_models() exit() # Build model self.model = build_model( model_name, num_classes=1, pretrained=not (w and w.is_file()), use_gpu=device ) if self.pt: # PyTorch # populate model arch with weights if w and w.is_file() and w.suffix == '.pt': load_pretrained_weights(self.model, w) self.model.to(device).eval() self.model.half() if self.fp16 else self.model.float() elif self.jit: LOGGER.info(f'Loading {w} for TorchScript inference...') self.model = torch.jit.load(w) self.model.half() if self.fp16 else self.model.float() elif self.onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') cuda = torch.cuda.is_available() and device.type != 'cpu' #check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] self.session = onnxruntime.InferenceSession(str(w), providers=providers) elif self.engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 if device.type == 'cpu': device = torch.device('cuda:0') Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: self.model_ = runtime.deserialize_cuda_engine(f.read()) self.context = self.model_.create_execution_context() self.bindings = OrderedDict() self.fp16 = False # default updated below dynamic = False for index in range(self.model_.num_bindings): name = self.model_.get_binding_name(index) dtype = trt.nptype(self.model_.get_binding_dtype(index)) if self.model_.binding_is_input(index): if -1 in tuple(self.model_.get_binding_shape(index)): # dynamic dynamic = True self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2])) if dtype == np.float16: self.fp16 = True shape = tuple(self.context.get_binding_shape(index)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) batch_size = self.bindings['images'].shape[0] # if dynamic, this is instead max batch size elif self.xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch ie = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: network.get_parameters()[0].set_layout(Layout("NCWH")) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() self.executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 self.output_layer = next(iter(self.executable_network.outputs)) elif self.tflite: LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: import tensorflow as tf Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, self.interpreter = tf.lite.Interpreter(model_path=w) self.interpreter.allocate_tensors() # Get input and output tensors. self.input_details = self.interpreter.get_input_details() self.output_details = self.interpreter.get_output_details() # Test model on random input data. input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32) self.interpreter.set_tensor(self.input_details[0]['index'], input_data) self.interpreter.invoke() # The function `get_tensor()` returns a copy of the tensor data. output_data = self.interpreter.get_tensor(self.output_details[0]['index']) else: print('This model framework is not supported yet!') exit() @staticmethod def model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx from trackers.reid_export import export_formats sf = list(export_formats().Suffix) # export suffixes check_suffix(p, sf) # checks types = [s in Path(p).name for s in sf] return types def _preprocess(self, im_batch): images = [] for element in im_batch: image = self.to_pil(element) image = self.preprocess(image) images.append(image) images = torch.stack(images, dim=0) images = images.to(self.device) return images def forward(self, im_batch): # preprocess batch im_batch = self._preprocess(im_batch) # batch to half if self.fp16 and im_batch.dtype != torch.float16: im_batch = im_batch.half() # batch processing features = [] if self.pt: features = self.model(im_batch) elif self.jit: # TorchScript features = self.model(im_batch) elif self.onnx: # ONNX Runtime im_batch = im_batch.cpu().numpy() # torch to numpy features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0] elif self.engine: # TensorRT if True and im_batch.shape != self.bindings['images'].shape: i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output')) self.context.set_binding_shape(i_in, im_batch.shape) # reshape if dynamic self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape) self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) s = self.bindings['images'].shape assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs['images'] = int(im_batch.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) features = self.bindings['output'].data elif self.xml: # OpenVINO im_batch = im_batch.cpu().numpy() # FP32 features = self.executable_network([im_batch])[self.output_layer] else: print('Framework not supported at the moment, we are working on it...') exit() if isinstance(features, (list, tuple)): return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features] else: return self.from_numpy(features) def from_numpy(self, x): return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=[(256, 128, 3)]): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite if any(warmup_types) and self.device.type != 'cpu': im = [np.empty(*imgsz).astype(np.uint8)] # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup ================================================ FILE: DLTA_AI_app/trackers/bytetrack/basetrack.py ================================================ import numpy as np from collections import OrderedDict class TrackState(object): New = 0 Tracked = 1 Lost = 2 Removed = 3 class BaseTrack(object): _count = 0 track_id = 0 is_activated = False state = TrackState.New history = OrderedDict() features = [] curr_feature = None score = 0 start_frame = 0 frame_id = 0 time_since_update = 0 # multi-camera location = (np.inf, np.inf) @property def end_frame(self): return self.frame_id @staticmethod def next_id(): BaseTrack._count += 1 return BaseTrack._count def activate(self, *args): raise NotImplementedError def predict(self): raise NotImplementedError def update(self, *args, **kwargs): raise NotImplementedError def mark_lost(self): self.state = TrackState.Lost def mark_removed(self): self.state = TrackState.Removed ================================================ FILE: DLTA_AI_app/trackers/bytetrack/byte_tracker.py ================================================ import numpy as np from collections import deque import os import os.path as osp import copy import torch import torch.nn.functional as F from ultralytics.yolo.utils.ops import xywh2xyxy, xyxy2xywh from trackers.bytetrack.kalman_filter import KalmanFilter from trackers.bytetrack import matching from trackers.bytetrack.basetrack import BaseTrack, TrackState class STrack(BaseTrack): shared_kalman = KalmanFilter() def __init__(self, tlwh, score, cls): # wait activate self._tlwh = np.asarray(tlwh, dtype=np.float32) self.kalman_filter = None self.mean, self.covariance = None, None self.is_activated = False self.score = score self.tracklet_len = 0 self.cls = cls def predict(self): mean_state = self.mean.copy() if self.state != TrackState.Tracked: mean_state[7] = 0 self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) @staticmethod def multi_predict(stracks): if len(stracks) > 0: multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) for i, st in enumerate(stracks): if st.state != TrackState.Tracked: multi_mean[i][7] = 0 multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): stracks[i].mean = mean stracks[i].covariance = cov def activate(self, kalman_filter, frame_id): """Start a new tracklet""" self.kalman_filter = kalman_filter self.track_id = self.next_id() self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh)) self.tracklet_len = 0 self.state = TrackState.Tracked if frame_id == 1: self.is_activated = True # self.is_activated = True self.frame_id = frame_id self.start_frame = frame_id def re_activate(self, new_track, frame_id, new_id=False): self.mean, self.covariance = self.kalman_filter.update( self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh) ) self.tracklet_len = 0 self.state = TrackState.Tracked self.is_activated = True self.frame_id = frame_id if new_id: self.track_id = self.next_id() self.score = new_track.score self.cls = new_track.cls def update(self, new_track, frame_id): """ Update a matched track :type new_track: STrack :type frame_id: int :type update_feature: bool :return: """ self.frame_id = frame_id self.tracklet_len += 1 # self.cls = cls new_tlwh = new_track.tlwh self.mean, self.covariance = self.kalman_filter.update( self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)) self.state = TrackState.Tracked self.is_activated = True self.score = new_track.score @property # @jit(nopython=True) def tlwh(self): """Get current position in bounding box format `(top left x, top left y, width, height)`. """ if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= ret[2:] / 2 return ret @property # @jit(nopython=True) def tlbr(self): """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`. """ ret = self.tlwh.copy() ret[2:] += ret[:2] return ret @staticmethod # @jit(nopython=True) def tlwh_to_xyah(tlwh): """Convert bounding box to format `(center x, center y, aspect ratio, height)`, where the aspect ratio is `width / height`. """ ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 ret[2] /= ret[3] return ret def to_xyah(self): return self.tlwh_to_xyah(self.tlwh) @staticmethod # @jit(nopython=True) def tlbr_to_tlwh(tlbr): ret = np.asarray(tlbr).copy() ret[2:] -= ret[:2] return ret @staticmethod # @jit(nopython=True) def tlwh_to_tlbr(tlwh): ret = np.asarray(tlwh).copy() ret[2:] += ret[:2] return ret def __repr__(self): return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame) class BYTETracker(object): def __init__(self, track_thresh=0.45, match_thresh=0.8, track_buffer=25, frame_rate=30): self.tracked_stracks = [] # type: list[STrack] self.lost_stracks = [] # type: list[STrack] self.removed_stracks = [] # type: list[STrack] self.frame_id = 0 self.track_buffer=track_buffer self.track_thresh = track_thresh self.match_thresh = match_thresh self.det_thresh = track_thresh + 0.1 self.buffer_size = int(frame_rate / 30.0 * track_buffer) self.max_time_lost = self.buffer_size self.kalman_filter = KalmanFilter() def update(self, dets, _): self.frame_id += 1 activated_starcks = [] refind_stracks = [] lost_stracks = [] removed_stracks = [] xyxys = dets[:, 0:4] xywh = xyxy2xywh(xyxys.numpy()) confs = dets[:, 4] clss = dets[:, 5] classes = clss.numpy() xyxys = xyxys.numpy() confs = confs.numpy() remain_inds = confs > self.track_thresh inds_low = confs > 0.1 inds_high = confs < self.track_thresh inds_second = np.logical_and(inds_low, inds_high) dets_second = xywh[inds_second] dets = xywh[remain_inds] scores_keep = confs[remain_inds] scores_second = confs[inds_second] clss_keep = classes[remain_inds] clss_second = classes[inds_second] if len(dets) > 0: '''Detections''' detections = [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores_keep, clss_keep)] else: detections = [] ''' Add newly detected tracklets to tracked_stracks''' unconfirmed = [] tracked_stracks = [] # type: list[STrack] for track in self.tracked_stracks: if not track.is_activated: unconfirmed.append(track) else: tracked_stracks.append(track) ''' Step 2: First association, with high score detection boxes''' strack_pool = joint_stracks(tracked_stracks, self.lost_stracks) # Predict the current location with KF STrack.multi_predict(strack_pool) dists = matching.iou_distance(strack_pool, detections) #if not self.args.mot20: dists = matching.fuse_score(dists, detections) matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.match_thresh) for itracked, idet in matches: track = strack_pool[itracked] det = detections[idet] if track.state == TrackState.Tracked: track.update(detections[idet], self.frame_id) activated_starcks.append(track) else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) ''' Step 3: Second association, with low score detection boxes''' # association the untrack to the low score detections if len(dets_second) > 0: '''Detections''' detections_second = [STrack(xywh, s, c) for (xywh, s, c) in zip(dets_second, scores_second, clss_second)] else: detections_second = [] r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] dists = matching.iou_distance(r_tracked_stracks, detections_second) matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) for itracked, idet in matches: track = r_tracked_stracks[itracked] det = detections_second[idet] if track.state == TrackState.Tracked: track.update(det, self.frame_id) activated_starcks.append(track) else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) for it in u_track: track = r_tracked_stracks[it] if not track.state == TrackState.Lost: track.mark_lost() lost_stracks.append(track) '''Deal with unconfirmed tracks, usually tracks with only one beginning frame''' detections = [detections[i] for i in u_detection] dists = matching.iou_distance(unconfirmed, detections) #if not self.args.mot20: dists = matching.fuse_score(dists, detections) matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) for itracked, idet in matches: unconfirmed[itracked].update(detections[idet], self.frame_id) activated_starcks.append(unconfirmed[itracked]) for it in u_unconfirmed: track = unconfirmed[it] track.mark_removed() removed_stracks.append(track) """ Step 4: Init new stracks""" for inew in u_detection: track = detections[inew] if track.score < self.det_thresh: continue track.activate(self.kalman_filter, self.frame_id) activated_starcks.append(track) """ Step 5: Update state""" for track in self.lost_stracks: if self.frame_id - track.end_frame > self.max_time_lost: track.mark_removed() removed_stracks.append(track) # print('Ramained match {} s'.format(t4-t3)) self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks) self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks) self.lost_stracks.extend(lost_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) self.removed_stracks.extend(removed_stracks) self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) # get scores of lost tracks output_stracks = [track for track in self.tracked_stracks if track.is_activated] outputs = [] for t in output_stracks: output= [] tlwh = t.tlwh tid = t.track_id tlwh = np.expand_dims(tlwh, axis=0) xyxy = xywh2xyxy(tlwh) xyxy = np.squeeze(xyxy, axis=0) output.extend(xyxy) output.append(tid) output.append(t.cls) output.append(t.score) outputs.append(output) return outputs #track_id, class_id, conf def joint_stracks(tlista, tlistb): exists = {} res = [] for t in tlista: exists[t.track_id] = 1 res.append(t) for t in tlistb: tid = t.track_id if not exists.get(tid, 0): exists[tid] = 1 res.append(t) return res def sub_stracks(tlista, tlistb): stracks = {} for t in tlista: stracks[t.track_id] = t for t in tlistb: tid = t.track_id if stracks.get(tid, 0): del stracks[tid] return list(stracks.values()) def remove_duplicate_stracks(stracksa, stracksb): pdist = matching.iou_distance(stracksa, stracksb) pairs = np.where(pdist < 0.15) dupa, dupb = list(), list() for p, q in zip(*pairs): timep = stracksa[p].frame_id - stracksa[p].start_frame timeq = stracksb[q].frame_id - stracksb[q].start_frame if timep > timeq: dupb.append(q) else: dupa.append(p) resa = [t for i, t in enumerate(stracksa) if not i in dupa] resb = [t for i, t in enumerate(stracksb) if not i in dupb] return resa, resb ================================================ FILE: DLTA_AI_app/trackers/bytetrack/configs/bytetrack.yaml ================================================ bytetrack: track_thresh: 0.6 # tracking confidence threshold track_buffer: 30 # the frames for keep lost tracks match_thresh: 0.8 # matching threshold for tracking frame_rate: 30 # FPS conf_thres: 0.5122620708221085 ================================================ FILE: DLTA_AI_app/trackers/bytetrack/kalman_filter.py ================================================ # vim: expandtab:ts=4:sw=4 import numpy as np import scipy.linalg """ Table for the 0.95 quantile of the chi-square distribution with N degrees of freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv function and used as Mahalanobis gating threshold. """ chi2inv95 = { 1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919} class KalmanFilter(object): """ A simple Kalman filter for tracking bounding boxes in image space. The 8-dimensional state space x, y, a, h, vx, vy, va, vh contains the bounding box center position (x, y), aspect ratio a, height h, and their respective velocities. Object motion follows a constant velocity model. The bounding box location (x, y, a, h) is taken as direct observation of the state space (linear observation model). """ def __init__(self): ndim, dt = 4, 1. # Create Kalman filter model matrices. self._motion_mat = np.eye(2 * ndim, 2 * ndim) for i in range(ndim): self._motion_mat[i, ndim + i] = dt self._update_mat = np.eye(ndim, 2 * ndim) # Motion and observation uncertainty are chosen relative to the current # state estimate. These weights control the amount of uncertainty in # the model. This is a bit hacky. self._std_weight_position = 1. / 20 self._std_weight_velocity = 1. / 160 def initiate(self, measurement): """Create track from unassociated measurement. Parameters ---------- measurement : ndarray Bounding box coordinates (x, y, a, h) with center position (x, y), aspect ratio a, and height h. Returns ------- (ndarray, ndarray) Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean. """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2, 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3], 10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3]] covariance = np.diag(np.square(std)) return mean, covariance def predict(self, mean, covariance): """Run Kalman filter prediction step. Parameters ---------- mean : ndarray The 8 dimensional mean vector of the object state at the previous time step. covariance : ndarray The 8x8 dimensional covariance matrix of the object state at the previous time step. Returns ------- (ndarray, ndarray) Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2, self._std_weight_position * mean[3]] std_vel = [ self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5, self._std_weight_velocity * mean[3]] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) #mean = np.dot(self._motion_mat, mean) mean = np.dot(mean, self._motion_mat.T) covariance = np.linalg.multi_dot(( self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance def project(self, mean, covariance): """Project state distribution to measurement space. Parameters ---------- mean : ndarray The state's mean vector (8 dimensional array). covariance : ndarray The state's covariance matrix (8x8 dimensional). Returns ------- (ndarray, ndarray) Returns the projected mean and covariance matrix of the given state estimate. """ std = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1, self._std_weight_position * mean[3]] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot(( self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov def multi_predict(self, mean, covariance): """Run Kalman filter prediction step (Vectorized version). Parameters ---------- mean : ndarray The Nx8 dimensional mean matrix of the object states at the previous time step. covariance : ndarray The Nx8x8 dimensional covariance matrics of the object states at the previous time step. Returns ------- (ndarray, ndarray) Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3], 1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3]] std_vel = [ self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3], 1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3]] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [] for i in range(len(mean)): motion_cov.append(np.diag(sqr[i])) motion_cov = np.asarray(motion_cov) mean = np.dot(mean, self._motion_mat.T) left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) covariance = np.dot(left, self._motion_mat.T) + motion_cov return mean, covariance def update(self, mean, covariance, measurement): """Run Kalman filter correction step. Parameters ---------- mean : ndarray The predicted state's mean vector (8 dimensional). covariance : ndarray The state's covariance matrix (8x8 dimensional). measurement : ndarray The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center position, a the aspect ratio, and h the height of the bounding box. Returns ------- (ndarray, ndarray) Returns the measurement-corrected state distribution. """ projected_mean, projected_cov = self.project(mean, covariance) chol_factor, lower = scipy.linalg.cho_factor( projected_cov, lower=True, check_finite=False) kalman_gain = scipy.linalg.cho_solve( (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False).T innovation = measurement - projected_mean new_mean = mean + np.dot(innovation, kalman_gain.T) new_covariance = covariance - np.linalg.multi_dot(( kalman_gain, projected_cov, kalman_gain.T)) return new_mean, new_covariance def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'): """Compute gating distance between state distribution and measurements. A suitable distance threshold can be obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of freedom, otherwise 2. Parameters ---------- mean : ndarray Mean vector over the state distribution (8 dimensional). covariance : ndarray Covariance of the state distribution (8x8 dimensional). measurements : ndarray An Nx4 dimensional matrix of N measurements, each in format (x, y, a, h) where (x, y) is the bounding box center position, a the aspect ratio, and h the height. only_position : Optional[bool] If True, distance computation is done with respect to the bounding box center position only. Returns ------- ndarray Returns an array of length N, where the i-th element contains the squared Mahalanobis distance between (mean, covariance) and `measurements[i]`. """ mean, covariance = self.project(mean, covariance) if only_position: mean, covariance = mean[:2], covariance[:2, :2] measurements = measurements[:, :2] d = measurements - mean if metric == 'gaussian': return np.sum(d * d, axis=1) elif metric == 'maha': cholesky_factor = np.linalg.cholesky(covariance) z = scipy.linalg.solve_triangular( cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) squared_maha = np.sum(z * z, axis=0) return squared_maha else: raise ValueError('invalid distance metric') ================================================ FILE: DLTA_AI_app/trackers/bytetrack/matching.py ================================================ import cv2 import numpy as np import scipy import lap from scipy.spatial.distance import cdist from trackers.bytetrack import kalman_filter import time def merge_matches(m1, m2, shape): O,P,Q = shape m1 = np.asarray(m1) m2 = np.asarray(m2) M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P)) M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q)) mask = M1*M2 match = mask.nonzero() match = list(zip(match[0], match[1])) unmatched_O = tuple(set(range(O)) - set([i for i, j in match])) unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match])) return match, unmatched_O, unmatched_Q def _indices_to_matches(cost_matrix, indices, thresh): matched_cost = cost_matrix[tuple(zip(*indices))] matched_mask = (matched_cost <= thresh) matches = indices[matched_mask] unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0])) unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1])) return matches, unmatched_a, unmatched_b def linear_assignment(cost_matrix, thresh): if cost_matrix.size == 0: return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) matches, unmatched_a, unmatched_b = [], [], [] cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) for ix, mx in enumerate(x): if mx >= 0: matches.append([ix, mx]) unmatched_a = np.where(x < 0)[0] unmatched_b = np.where(y < 0)[0] matches = np.asarray(matches) return matches, unmatched_a, unmatched_b def ious(atlbrs, btlbrs): """ Compute cost based on IoU :type atlbrs: list[tlbr] | np.ndarray :type atlbrs: list[tlbr] | np.ndarray :rtype ious np.ndarray """ ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) if ious.size == 0: return ious ious = bbox_ious( np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32) ) return ious def iou_distance(atracks, btracks): """ Compute cost based on IoU :type atracks: list[STrack] :type btracks: list[STrack] :rtype cost_matrix np.ndarray """ if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): atlbrs = atracks btlbrs = btracks else: atlbrs = [track.tlbr for track in atracks] btlbrs = [track.tlbr for track in btracks] _ious = ious(atlbrs, btlbrs) cost_matrix = 1 - _ious return cost_matrix def v_iou_distance(atracks, btracks): """ Compute cost based on IoU :type atracks: list[STrack] :type btracks: list[STrack] :rtype cost_matrix np.ndarray """ if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): atlbrs = atracks btlbrs = btracks else: atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] _ious = ious(atlbrs, btlbrs) cost_matrix = 1 - _ious return cost_matrix def embedding_distance(tracks, detections, metric='cosine'): """ :param tracks: list[STrack] :param detections: list[BaseTrack] :param metric: :return: cost_matrix np.ndarray """ cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) if cost_matrix.size == 0: return cost_matrix det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32) #for i, track in enumerate(tracks): #cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric)) track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32) cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features return cost_matrix def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): if cost_matrix.size == 0: return cost_matrix gating_dim = 2 if only_position else 4 gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray([det.to_xyah() for det in detections]) for row, track in enumerate(tracks): gating_distance = kf.gating_distance( track.mean, track.covariance, measurements, only_position) cost_matrix[row, gating_distance > gating_threshold] = np.inf return cost_matrix def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98): if cost_matrix.size == 0: return cost_matrix gating_dim = 2 if only_position else 4 gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray([det.to_xyah() for det in detections]) for row, track in enumerate(tracks): gating_distance = kf.gating_distance( track.mean, track.covariance, measurements, only_position, metric='maha') cost_matrix[row, gating_distance > gating_threshold] = np.inf cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance return cost_matrix def fuse_iou(cost_matrix, tracks, detections): if cost_matrix.size == 0: return cost_matrix reid_sim = 1 - cost_matrix iou_dist = iou_distance(tracks, detections) iou_sim = 1 - iou_dist fuse_sim = reid_sim * (1 + iou_sim) / 2 det_scores = np.array([det.score for det in detections]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) #fuse_sim = fuse_sim * (1 + det_scores) / 2 fuse_cost = 1 - fuse_sim return fuse_cost def fuse_score(cost_matrix, detections): if cost_matrix.size == 0: return cost_matrix iou_sim = 1 - cost_matrix det_scores = np.array([det.score for det in detections]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) fuse_sim = iou_sim * det_scores fuse_cost = 1 - fuse_sim return fuse_cost def bbox_ious(boxes, query_boxes): """ Parameters ---------- boxes: (N, 4) ndarray of float query_boxes: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=np.float32) for k in range(K): box_area = ( (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1) ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1 ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1 ) if ih > 0: ua = float( (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1) + box_area - iw * ih ) overlaps[n, k] = iw * ih / ua return overlaps ================================================ FILE: DLTA_AI_app/trackers/deepocsort/__init__.py ================================================ from . import args from . import ocsort ================================================ FILE: DLTA_AI_app/trackers/deepocsort/args.py ================================================ import argparse def make_parser(): parser = argparse.ArgumentParser("OC-SORT parameters") # distributed parser.add_argument("-b", "--batch-size", type=int, default=1, help="batch size") parser.add_argument("-d", "--devices", default=None, type=int, help="device for training") parser.add_argument("--local_rank", default=0, type=int, help="local rank for dist training") parser.add_argument("--num_machines", default=1, type=int, help="num of node for training") parser.add_argument("--machine_rank", default=0, type=int, help="node rank for multi-node training") parser.add_argument( "-f", "--exp_file", default=None, type=str, help="pls input your expriment description file", ) parser.add_argument( "--test", dest="test", default=False, action="store_true", help="Evaluating on test-dev set.", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) # det args parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval") parser.add_argument("--conf", default=0.1, type=float, help="test conf") parser.add_argument("--nms", default=0.7, type=float, help="test nms threshold") parser.add_argument("--tsize", default=[800, 1440], nargs="+", type=int, help="test img size") parser.add_argument("--seed", default=None, type=int, help="eval seed") # tracking args parser.add_argument("--track_thresh", type=float, default=0.6, help="detection confidence threshold") parser.add_argument( "--iou_thresh", type=float, default=0.3, help="the iou threshold in Sort for matching", ) parser.add_argument("--min_hits", type=int, default=3, help="min hits to create track in SORT") parser.add_argument( "--inertia", type=float, default=0.2, help="the weight of VDC term in cost matrix", ) parser.add_argument( "--deltat", type=int, default=3, help="time step difference to estimate direction", ) parser.add_argument("--track_buffer", type=int, default=30, help="the frames for keep lost tracks") parser.add_argument( "--match_thresh", type=float, default=0.9, help="matching threshold for tracking", ) parser.add_argument( "--gt-type", type=str, default="_val_half", help="suffix to find the gt annotation", ) parser.add_argument("--public", action="store_true", help="use public detection") parser.add_argument("--asso", default="iou", help="similarity function: iou/giou/diou/ciou/ctdis") # for kitti/bdd100k inference with public detections parser.add_argument( "--raw_results_path", type=str, default="exps/permatrack_kitti_test/", help="path to the raw tracking results from other tracks", ) parser.add_argument("--out_path", type=str, help="path to save output results") parser.add_argument( "--hp", action="store_true", help="use head padding to add the missing objects during \ initializing the tracks (offline).", ) # for demo video parser.add_argument("--demo_type", default="image", help="demo type, eg. image, video and webcam") parser.add_argument("--path", default="./videos/demo.mp4", help="path to images or video") parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id") parser.add_argument( "--save_result", action="store_true", help="whether to save the inference result of image/video", ) parser.add_argument( "--device", default="gpu", type=str, help="device to run our model, can either be cpu or gpu", ) return parser ================================================ FILE: DLTA_AI_app/trackers/deepocsort/association.py ================================================ import os import pdb import numpy as np from scipy.special import softmax def iou_batch(bboxes1, bboxes2): """ From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2] """ bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) w = np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2 - yy1) wh = w * h o = wh / ( (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh ) return o def giou_batch(bboxes1, bboxes2): """ :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) :return: """ # for details should go to https://arxiv.org/pdf/1902.09630.pdf # ensure predict's bbox form bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) w = np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2 - yy1) wh = w * h iou = wh / ( (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh ) xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) wc = xxc2 - xxc1 hc = yyc2 - yyc1 assert (wc > 0).all() and (hc > 0).all() area_enclose = wc * hc giou = iou - (area_enclose - wh) / area_enclose giou = (giou + 1.0) / 2.0 # resize from (-1,1) to (0,1) return giou def diou_batch(bboxes1, bboxes2): """ :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) :return: """ # for details should go to https://arxiv.org/pdf/1902.09630.pdf # ensure predict's bbox form bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) # calculate the intersection box xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) w = np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2 - yy1) wh = w * h iou = wh / ( (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh ) centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2 diou = iou - inner_diag / outer_diag return (diou + 1) / 2.0 # resize from (-1,1) to (0,1) def ciou_batch(bboxes1, bboxes2): """ :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) :return: """ # for details should go to https://arxiv.org/pdf/1902.09630.pdf # ensure predict's bbox form bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) # calculate the intersection box xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) w = np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2 - yy1) wh = w * h iou = wh / ( (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh ) centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2 w1 = bboxes1[..., 2] - bboxes1[..., 0] h1 = bboxes1[..., 3] - bboxes1[..., 1] w2 = bboxes2[..., 2] - bboxes2[..., 0] h2 = bboxes2[..., 3] - bboxes2[..., 1] # prevent dividing over zero. add one pixel shift h2 = h2 + 1.0 h1 = h1 + 1.0 arctan = np.arctan(w2 / h2) - np.arctan(w1 / h1) v = (4 / (np.pi**2)) * (arctan**2) S = 1 - iou alpha = v / (S + v) ciou = iou - inner_diag / outer_diag - alpha * v return (ciou + 1) / 2.0 # resize from (-1,1) to (0,1) def ct_dist(bboxes1, bboxes2): """ Measure the center distance between two sets of bounding boxes, this is a coarse implementation, we don't recommend using it only for association, which can be unstable and sensitive to frame rate and object speed. """ bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 ct_dist2 = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 ct_dist = np.sqrt(ct_dist2) # The linear rescaling is a naive version and needs more study ct_dist = ct_dist / ct_dist.max() return ct_dist.max() - ct_dist # resize to (0,1) def speed_direction_batch(dets, tracks): tracks = tracks[..., np.newaxis] CX1, CY1 = (dets[:, 0] + dets[:, 2]) / 2.0, (dets[:, 1] + dets[:, 3]) / 2.0 CX2, CY2 = (tracks[:, 0] + tracks[:, 2]) / 2.0, (tracks[:, 1] + tracks[:, 3]) / 2.0 dx = CX1 - CX2 dy = CY1 - CY2 norm = np.sqrt(dx**2 + dy**2) + 1e-6 dx = dx / norm dy = dy / norm return dy, dx # size: num_track x num_det def linear_assignment(cost_matrix): try: import lap _, x, y = lap.lapjv(cost_matrix, extend_cost=True) return np.array([[y[i], i] for i in x if i >= 0]) # except ImportError: from scipy.optimize import linear_sum_assignment x, y = linear_sum_assignment(cost_matrix) return np.array(list(zip(x, y))) def associate_detections_to_trackers(detections, trackers, iou_threshold=0.3): """ Assigns detections to tracked object (both represented as bounding boxes) Returns 3 lists of matches, unmatched_detections and unmatched_trackers """ if len(trackers) == 0: return ( np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int), ) iou_matrix = iou_batch(detections, trackers) if min(iou_matrix.shape) > 0: a = (iou_matrix > iou_threshold).astype(np.int32) if a.sum(1).max() == 1 and a.sum(0).max() == 1: matched_indices = np.stack(np.where(a), axis=1) else: matched_indices = linear_assignment(-iou_matrix) else: matched_indices = np.empty(shape=(0, 2)) unmatched_detections = [] for d, det in enumerate(detections): if d not in matched_indices[:, 0]: unmatched_detections.append(d) unmatched_trackers = [] for t, trk in enumerate(trackers): if t not in matched_indices[:, 1]: unmatched_trackers.append(t) # filter out matched with low IOU matches = [] for m in matched_indices: if iou_matrix[m[0], m[1]] < iou_threshold: unmatched_detections.append(m[0]) unmatched_trackers.append(m[1]) else: matches.append(m.reshape(1, 2)) if len(matches) == 0: matches = np.empty((0, 2), dtype=int) else: matches = np.concatenate(matches, axis=0) return matches, np.array(unmatched_detections), np.array(unmatched_trackers) def compute_aw_max_metric(emb_cost, w_association_emb, bottom=0.5): w_emb = np.full_like(emb_cost, w_association_emb) for idx in range(emb_cost.shape[0]): inds = np.argsort(-emb_cost[idx]) # If there's less than two matches, just keep original weight if len(inds) < 2: continue if emb_cost[idx, inds[0]] == 0: row_weight = 0 else: row_weight = 1 - max((emb_cost[idx, inds[1]] / emb_cost[idx, inds[0]]) - bottom, 0) / (1 - bottom) w_emb[idx] *= row_weight for idj in range(emb_cost.shape[1]): inds = np.argsort(-emb_cost[:, idj]) # If there's less than two matches, just keep original weight if len(inds) < 2: continue if emb_cost[inds[0], idj] == 0: col_weight = 0 else: col_weight = 1 - max((emb_cost[inds[1], idj] / emb_cost[inds[0], idj]) - bottom, 0) / (1 - bottom) w_emb[:, idj] *= col_weight return w_emb * emb_cost def associate( detections, trackers, iou_threshold, velocities, previous_obs, vdc_weight, emb_cost, w_assoc_emb, aw_off, aw_param ): if len(trackers) == 0: return ( np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int), ) Y, X = speed_direction_batch(detections, previous_obs) inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1] inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1) inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1) diff_angle_cos = inertia_X * X + inertia_Y * Y diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1) diff_angle = np.arccos(diff_angle_cos) diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi valid_mask = np.ones(previous_obs.shape[0]) valid_mask[np.where(previous_obs[:, 4] < 0)] = 0 iou_matrix = iou_batch(detections, trackers) scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1) # iou_matrix = iou_matrix * scores # a trick sometiems works, we don't encourage this valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1) angle_diff_cost = (valid_mask * diff_angle) * vdc_weight angle_diff_cost = angle_diff_cost.T angle_diff_cost = angle_diff_cost * scores if min(iou_matrix.shape) > 0: a = (iou_matrix > iou_threshold).astype(np.int32) if a.sum(1).max() == 1 and a.sum(0).max() == 1: matched_indices = np.stack(np.where(a), axis=1) else: if emb_cost is None: emb_cost = 0 else: emb_cost = emb_cost.cpu().numpy() emb_cost[iou_matrix <= 0] = 0 if not aw_off: emb_cost = compute_aw_max_metric(emb_cost, w_assoc_emb, bottom=aw_param) else: emb_cost *= w_assoc_emb final_cost = -(iou_matrix + angle_diff_cost + emb_cost) matched_indices = linear_assignment(final_cost) else: matched_indices = np.empty(shape=(0, 2)) unmatched_detections = [] for d, det in enumerate(detections): if d not in matched_indices[:, 0]: unmatched_detections.append(d) unmatched_trackers = [] for t, trk in enumerate(trackers): if t not in matched_indices[:, 1]: unmatched_trackers.append(t) # filter out matched with low IOU matches = [] for m in matched_indices: if iou_matrix[m[0], m[1]] < iou_threshold: unmatched_detections.append(m[0]) unmatched_trackers.append(m[1]) else: matches.append(m.reshape(1, 2)) if len(matches) == 0: matches = np.empty((0, 2), dtype=int) else: matches = np.concatenate(matches, axis=0) return matches, np.array(unmatched_detections), np.array(unmatched_trackers) def associate_kitti(detections, trackers, det_cates, iou_threshold, velocities, previous_obs, vdc_weight): if len(trackers) == 0: return ( np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int), ) """ Cost from the velocity direction consistency """ Y, X = speed_direction_batch(detections, previous_obs) inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1] inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1) inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1) diff_angle_cos = inertia_X * X + inertia_Y * Y diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1) diff_angle = np.arccos(diff_angle_cos) diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi valid_mask = np.ones(previous_obs.shape[0]) valid_mask[np.where(previous_obs[:, 4] < 0)] = 0 valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1) scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1) angle_diff_cost = (valid_mask * diff_angle) * vdc_weight angle_diff_cost = angle_diff_cost.T angle_diff_cost = angle_diff_cost * scores """ Cost from IoU """ iou_matrix = iou_batch(detections, trackers) """ With multiple categories, generate the cost for catgory mismatch """ num_dets = detections.shape[0] num_trk = trackers.shape[0] cate_matrix = np.zeros((num_dets, num_trk)) for i in range(num_dets): for j in range(num_trk): if det_cates[i] != trackers[j, 4]: cate_matrix[i][j] = -1e6 cost_matrix = -iou_matrix - angle_diff_cost - cate_matrix if min(iou_matrix.shape) > 0: a = (iou_matrix > iou_threshold).astype(np.int32) if a.sum(1).max() == 1 and a.sum(0).max() == 1: matched_indices = np.stack(np.where(a), axis=1) else: matched_indices = linear_assignment(cost_matrix) else: matched_indices = np.empty(shape=(0, 2)) unmatched_detections = [] for d, det in enumerate(detections): if d not in matched_indices[:, 0]: unmatched_detections.append(d) unmatched_trackers = [] for t, trk in enumerate(trackers): if t not in matched_indices[:, 1]: unmatched_trackers.append(t) # filter out matched with low IOU matches = [] for m in matched_indices: if iou_matrix[m[0], m[1]] < iou_threshold: unmatched_detections.append(m[0]) unmatched_trackers.append(m[1]) else: matches.append(m.reshape(1, 2)) if len(matches) == 0: matches = np.empty((0, 2), dtype=int) else: matches = np.concatenate(matches, axis=0) return matches, np.array(unmatched_detections), np.array(unmatched_trackers) ================================================ FILE: DLTA_AI_app/trackers/deepocsort/cmc.py ================================================ import pdb import pickle import os import cv2 import numpy as np class CMCComputer: def __init__(self, minimum_features=10, method="sparse"): assert method in ["file", "sparse", "sift"] os.makedirs("./cache", exist_ok=True) self.cache_path = "./cache/affine_ocsort.pkl" self.cache = {} if os.path.exists(self.cache_path): with open(self.cache_path, "rb") as fp: self.cache = pickle.load(fp) self.minimum_features = minimum_features self.prev_img = None self.prev_desc = None self.sparse_flow_param = dict( maxCorners=3000, qualityLevel=0.01, minDistance=1, blockSize=3, useHarrisDetector=False, k=0.04, ) self.file_computed = {} self.comp_function = None if method == "sparse": self.comp_function = self._affine_sparse_flow elif method == "sift": self.comp_function = self._affine_sift # Same BoT-SORT CMC arrays elif method == "file": self.comp_function = self._affine_file self.file_affines = {} # Maps from tag name to file name self.file_names = {} # All the ablation file names for f_name in os.listdir("./cache/cmc_files/MOT17_ablation/"): # The tag that'll be passed into compute_affine based on image name tag = f_name.replace("GMC-", "").replace(".txt", "") + "-FRCNN" f_name = os.path.join("./cache/cmc_files/MOT17_ablation/", f_name) self.file_names[tag] = f_name for f_name in os.listdir("./cache/cmc_files/MOT20_ablation/"): tag = f_name.replace("GMC-", "").replace(".txt", "") f_name = os.path.join("./cache/cmc_files/MOT20_ablation/", f_name) self.file_names[tag] = f_name # All the test file names for f_name in os.listdir("./cache/cmc_files/MOTChallenge/"): tag = f_name.replace("GMC-", "").replace(".txt", "") if "MOT17" in tag: tag = tag + "-FRCNN" # If it's an ablation one (not test) don't overwrite it if tag in self.file_names: continue f_name = os.path.join("./cache/cmc_files/MOTChallenge/", f_name) self.file_names[tag] = f_name def compute_affine(self, img, bbox, tag): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if tag in self.cache: A = self.cache[tag] return A mask = np.ones_like(img, dtype=np.uint8) if bbox.shape[0] > 0: bbox = np.round(bbox).astype(np.int32) bbox[bbox < 0] = 0 for bb in bbox: mask[bb[1] : bb[3], bb[0] : bb[2]] = 0 A = self.comp_function(img, mask, tag) self.cache[tag] = A return A def _load_file(self, name): affines = [] with open(self.file_names[name], "r") as fp: for line in fp: tokens = [float(f) for f in line.split("\t")[1:7]] A = np.eye(2, 3) A[0, 0] = tokens[0] A[0, 1] = tokens[1] A[0, 2] = tokens[2] A[1, 0] = tokens[3] A[1, 1] = tokens[4] A[1, 2] = tokens[5] affines.append(A) self.file_affines[name] = affines def _affine_file(self, frame, mask, tag): name, num = tag.split(":") if name not in self.file_affines: self._load_file(name) if name not in self.file_affines: raise RuntimeError("Error loading file affines for CMC.") return self.file_affines[name][int(num) - 1] def _affine_sift(self, frame, mask, tag): A = np.eye(2, 3) detector = cv2.SIFT_create() kp, desc = detector.detectAndCompute(frame, mask) if self.prev_desc is None: self.prev_desc = [kp, desc] return A if desc.shape[0] < self.minimum_features or self.prev_desc[1].shape[0] < self.minimum_features: return A bf = cv2.BFMatcher(cv2.NORM_L2) matches = bf.knnMatch(self.prev_desc[1], desc, k=2) good = [] for m, n in matches: if m.distance < 0.7 * n.distance: good.append(m) if len(good) > self.minimum_features: src_pts = np.float32([self.prev_desc[0][m.queryIdx].pt for m in good]).reshape(-1, 1, 2) dst_pts = np.float32([kp[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) A, _ = cv2.estimateAffinePartial2D(src_pts, dst_pts, method=cv2.RANSAC) else: print("Warning: not enough matching points") if A is None: A = np.eye(2, 3) self.prev_desc = [kp, desc] return A def _affine_sparse_flow(self, frame, mask, tag): # Initialize A = np.eye(2, 3) # find the keypoints keypoints = cv2.goodFeaturesToTrack(frame, mask=mask, **self.sparse_flow_param) # Handle first frame if self.prev_img is None: self.prev_img = frame self.prev_desc = keypoints return A matched_kp, status, err = cv2.calcOpticalFlowPyrLK(self.prev_img, frame, self.prev_desc, None) matched_kp = matched_kp.reshape(-1, 2) status = status.reshape(-1) prev_points = self.prev_desc.reshape(-1, 2) prev_points = prev_points[status] curr_points = matched_kp[status] # Find rigid matrix if prev_points.shape[0] > self.minimum_features: A, _ = cv2.estimateAffinePartial2D(prev_points, curr_points, method=cv2.RANSAC) else: print("Warning: not enough matching points") if A is None: A = np.eye(2, 3) self.prev_img = frame self.prev_desc = keypoints return A def dump_cache(self): with open(self.cache_path, "wb") as fp: pickle.dump(self.cache, fp) ================================================ FILE: DLTA_AI_app/trackers/deepocsort/configs/deepocsort.yaml ================================================ # Trial number: 137 # HOTA, MOTA, IDF1: [55.567] deepocsort: asso_func: giou conf_thres: 0.5122620708221085 delta_t: 1 det_thresh: 0 inertia: 0.3941737016672115 iou_thresh: 0.22136877277096445 max_age: 50 min_hits: 1 use_byte: false ================================================ FILE: DLTA_AI_app/trackers/deepocsort/embedding.py ================================================ import pdb from collections import OrderedDict import os import pickle import torch import cv2 import torchvision import numpy as np class EmbeddingComputer: def __init__(self, dataset): self.model = None self.dataset = dataset self.crop_size = (128, 384) os.makedirs("./cache/embeddings/", exist_ok=True) self.cache_path = "./cache/embeddings/{}_embedding.pkl" self.cache = {} self.cache_name = "" def load_cache(self, path): self.cache_name = path cache_path = self.cache_path.format(path) if os.path.exists(cache_path): with open(cache_path, "rb") as fp: self.cache = pickle.load(fp) def compute_embedding(self, img, bbox, tag, is_numpy=True): if self.cache_name != tag.split(":")[0]: self.load_cache(tag.split(":")[0]) if tag in self.cache: embs = self.cache[tag] if embs.shape[0] != bbox.shape[0]: raise RuntimeError( "ERROR: The number of cached embeddings don't match the " "number of detections.\nWas the detector model changed? Delete cache if so." ) return embs if self.model is None: self.initialize_model() # Make sure bbox is within image frame if is_numpy: h, w = img.shape[:2] else: h, w = img.shape[2:] results = np.round(bbox).astype(np.int32) results[:, 0] = results[:, 0].clip(0, w) results[:, 1] = results[:, 1].clip(0, h) results[:, 2] = results[:, 2].clip(0, w) results[:, 3] = results[:, 3].clip(0, h) # Generate all the crops crops = [] for p in results: if is_numpy: crop = img[p[1] : p[3], p[0] : p[2]] crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB) crop = cv2.resize(crop, self.crop_size, interpolation=cv2.INTER_LINEAR) crop = torch.as_tensor(crop.astype("float32").transpose(2, 0, 1)) crop = crop.unsqueeze(0) else: crop = img[:, :, p[1] : p[3], p[0] : p[2]] crop = torchvision.transforms.functional.resize(crop, self.crop_size) crops.append(crop) crops = torch.cat(crops, dim=0) # Create embeddings and l2 normalize them with torch.no_grad(): crops = crops.cuda() crops = crops.half() embs = self.model(crops) embs = torch.nn.functional.normalize(embs) embs = embs.cpu().numpy() self.cache[tag] = embs return embs def initialize_model(self): """ model = torchreid.models.build_model(name="osnet_ain_x1_0", num_classes=2510, loss="softmax", pretrained=False) sd = torch.load("external/weights/osnet_ain_ms_d_c.pth.tar")["state_dict"] new_state_dict = OrderedDict() for k, v in sd.items(): name = k[7:] # remove `module.` new_state_dict[name] = v # load params model.load_state_dict(new_state_dict) model.eval() model.cuda() """ if self.dataset == "mot17": path = "external/weights/mot17_sbs_S50.pth" elif self.dataset == "mot20": path = "external/weights/mot20_sbs_S50.pth" elif self.dataset == "dance": path = None else: raise RuntimeError("Need the path for a new ReID model.") model = FastReID(path) model.eval() model.cuda() model.half() self.model = model def dump_cache(self): if self.cache_name: with open(self.cache_path.format(self.cache_name), "wb") as fp: pickle.dump(self.cache, fp) ================================================ FILE: DLTA_AI_app/trackers/deepocsort/kalmanfilter.py ================================================ # -*- coding: utf-8 -*- # pylint: disable=invalid-name, too-many-arguments, too-many-branches, # pylint: disable=too-many-locals, too-many-instance-attributes, too-many-lines """ This module implements the linear Kalman filter in both an object oriented and procedural form. The KalmanFilter class implements the filter by storing the various matrices in instance variables, minimizing the amount of bookkeeping you have to do. All Kalman filters operate with a predict->update cycle. The predict step, implemented with the method or function predict(), uses the state transition matrix F to predict the state in the next time period (epoch). The state is stored as a gaussian (x, P), where x is the state (column) vector, and P is its covariance. Covariance matrix Q specifies the process covariance. In Bayesian terms, this prediction is called the *prior*, which you can think of colloquially as the estimate prior to incorporating the measurement. The update step, implemented with the method or function `update()`, incorporates the measurement z with covariance R, into the state estimate (x, P). The class stores the system uncertainty in S, the innovation (residual between prediction and measurement in measurement space) in y, and the Kalman gain in k. The procedural form returns these variables to you. In Bayesian terms this computes the *posterior* - the estimate after the information from the measurement is incorporated. Whether you use the OO form or procedural form is up to you. If matrices such as H, R, and F are changing each epoch, you'll probably opt to use the procedural form. If they are unchanging, the OO form is perhaps easier to use since you won't need to keep track of these matrices. This is especially useful if you are implementing banks of filters or comparing various KF designs for performance; a trivial coding bug could lead to using the wrong sets of matrices. This module also offers an implementation of the RTS smoother, and other helper functions, such as log likelihood computations. The Saver class allows you to easily save the state of the KalmanFilter class after every update This module expects NumPy arrays for all values that expect arrays, although in a few cases, particularly method parameters, it will accept types that convert to NumPy arrays, such as lists of lists. These exceptions are documented in the method or function. Examples -------- The following example constructs a constant velocity kinematic filter, filters noisy data, and plots the results. It also demonstrates using the Saver class to save the state of the filter at each epoch. .. code-block:: Python import matplotlib.pyplot as plt import numpy as np from filterpy.kalman import KalmanFilter from filterpy.common import Q_discrete_white_noise, Saver r_std, q_std = 2., 0.003 cv = KalmanFilter(dim_x=2, dim_z=1) cv.x = np.array([[0., 1.]]) # position, velocity cv.F = np.array([[1, dt],[ [0, 1]]) cv.R = np.array([[r_std^^2]]) f.H = np.array([[1., 0.]]) f.P = np.diag([.1^^2, .03^^2) f.Q = Q_discrete_white_noise(2, dt, q_std**2) saver = Saver(cv) for z in range(100): cv.predict() cv.update([z + randn() * r_std]) saver.save() # save the filter's state saver.to_array() plt.plot(saver.x[:, 0]) # plot all of the priors plt.plot(saver.x_prior[:, 0]) # plot mahalanobis distance plt.figure() plt.plot(saver.mahalanobis) This code implements the same filter using the procedural form x = np.array([[0., 1.]]) # position, velocity F = np.array([[1, dt],[ [0, 1]]) R = np.array([[r_std^^2]]) H = np.array([[1., 0.]]) P = np.diag([.1^^2, .03^^2) Q = Q_discrete_white_noise(2, dt, q_std**2) for z in range(100): x, P = predict(x, P, F=F, Q=Q) x, P = update(x, P, z=[z + randn() * r_std], R=R, H=H) xs.append(x[0, 0]) plt.plot(xs) For more examples see the test subdirectory, or refer to the book cited below. In it I both teach Kalman filtering from basic principles, and teach the use of this library in great detail. FilterPy library. http://github.com/rlabbe/filterpy Documentation at: https://filterpy.readthedocs.org Supporting book at: https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python This is licensed under an MIT license. See the readme.MD file for more information. Copyright 2014-2018 Roger R Labbe Jr. """ from __future__ import absolute_import, division import pdb from copy import deepcopy from math import log, exp, sqrt import sys import numpy as np from numpy import dot, zeros, eye, isscalar, shape import numpy.linalg as linalg from filterpy.stats import logpdf from filterpy.common import pretty_str, reshape_z class KalmanFilterNew(object): """Implements a Kalman filter. You are responsible for setting the various state variables to reasonable values; the defaults will not give you a functional filter. For now the best documentation is my free book Kalman and Bayesian Filters in Python [2]_. The test files in this directory also give you a basic idea of use, albeit without much description. In brief, you will first construct this object, specifying the size of the state vector with dim_x and the size of the measurement vector that you will be using with dim_z. These are mostly used to perform size checks when you assign values to the various matrices. For example, if you specified dim_z=2 and then try to assign a 3x3 matrix to R (the measurement noise matrix you will get an assert exception because R should be 2x2. (If for whatever reason you need to alter the size of things midstream just use the underscore version of the matrices to assign directly: your_filter._R = a_3x3_matrix.) After construction the filter will have default matrices created for you, but you must specify the values for each. It’s usually easiest to just overwrite them rather than assign to each element yourself. This will be clearer in the example below. All are of type numpy.array. Examples -------- Here is a filter that tracks position and velocity using a sensor that only reads position. First construct the object with the required dimensionality. Here the state (`dim_x`) has 2 coefficients (position and velocity), and the measurement (`dim_z`) has one. In FilterPy `x` is the state, `z` is the measurement. .. code:: from filterpy.kalman import KalmanFilter f = KalmanFilter (dim_x=2, dim_z=1) Assign the initial value for the state (position and velocity). You can do this with a two dimensional array like so: .. code:: f.x = np.array([[2.], # position [0.]]) # velocity or just use a one dimensional array, which I prefer doing. .. code:: f.x = np.array([2., 0.]) Define the state transition matrix: .. code:: f.F = np.array([[1.,1.], [0.,1.]]) Define the measurement function. Here we need to convert a position-velocity vector into just a position vector, so we use: .. code:: f.H = np.array([[1., 0.]]) Define the state's covariance matrix P. .. code:: f.P = np.array([[1000., 0.], [ 0., 1000.] ]) Now assign the measurement noise. Here the dimension is 1x1, so I can use a scalar .. code:: f.R = 5 I could have done this instead: .. code:: f.R = np.array([[5.]]) Note that this must be a 2 dimensional array. Finally, I will assign the process noise. Here I will take advantage of another FilterPy library function: .. code:: from filterpy.common import Q_discrete_white_noise f.Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13) Now just perform the standard predict/update loop: .. code:: while some_condition_is_true: z = get_sensor_reading() f.predict() f.update(z) do_something_with_estimate (f.x) **Procedural Form** This module also contains stand alone functions to perform Kalman filtering. Use these if you are not a fan of objects. **Example** .. code:: while True: z, R = read_sensor() x, P = predict(x, P, F, Q) x, P = update(x, P, z, R, H) See my book Kalman and Bayesian Filters in Python [2]_. You will have to set the following attributes after constructing this object for the filter to perform properly. Please note that there are various checks in place to ensure that you have made everything the 'correct' size. However, it is possible to provide incorrectly sized arrays such that the linear algebra can not perform an operation. It can also fail silently - you can end up with matrices of a size that allows the linear algebra to work, but are the wrong shape for the problem you are trying to solve. Parameters ---------- dim_x : int Number of state variables for the Kalman filter. For example, if you are tracking the position and velocity of an object in two dimensions, dim_x would be 4. This is used to set the default size of P, Q, and u dim_z : int Number of of measurement inputs. For example, if the sensor provides you with position in (x,y), dim_z would be 2. dim_u : int (optional) size of the control input, if it is being used. Default value of 0 indicates it is not used. compute_log_likelihood : bool (default = True) Computes log likelihood by default, but this can be a slow computation, so if you never use it you can turn this computation off. Attributes ---------- x : numpy.array(dim_x, 1) Current state estimate. Any call to update() or predict() updates this variable. P : numpy.array(dim_x, dim_x) Current state covariance matrix. Any call to update() or predict() updates this variable. x_prior : numpy.array(dim_x, 1) Prior (predicted) state estimate. The *_prior and *_post attributes are for convenience; they store the prior and posterior of the current epoch. Read Only. P_prior : numpy.array(dim_x, dim_x) Prior (predicted) state covariance matrix. Read Only. x_post : numpy.array(dim_x, 1) Posterior (updated) state estimate. Read Only. P_post : numpy.array(dim_x, dim_x) Posterior (updated) state covariance matrix. Read Only. z : numpy.array Last measurement used in update(). Read only. R : numpy.array(dim_z, dim_z) Measurement noise covariance matrix. Also known as the observation covariance. Q : numpy.array(dim_x, dim_x) Process noise covariance matrix. Also known as the transition covariance. F : numpy.array() State Transition matrix. Also known as `A` in some formulation. H : numpy.array(dim_z, dim_x) Measurement function. Also known as the observation matrix, or as `C`. y : numpy.array Residual of the update step. Read only. K : numpy.array(dim_x, dim_z) Kalman gain of the update step. Read only. S : numpy.array System uncertainty (P projected to measurement space). Read only. SI : numpy.array Inverse system uncertainty. Read only. log_likelihood : float log-likelihood of the last measurement. Read only. likelihood : float likelihood of last measurement. Read only. Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min. mahalanobis : float mahalanobis distance of the innovation. Read only. inv : function, default numpy.linalg.inv If you prefer another inverse function, such as the Moore-Penrose pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv This is only used to invert self.S. If you know it is diagonal, you might choose to set it to filterpy.common.inv_diagonal, which is several times faster than numpy.linalg.inv for diagonal matrices. alpha : float Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon [1]_. References ---------- .. [1] Dan Simon. "Optimal State Estimation." John Wiley & Sons. p. 208-212. (2006) .. [2] Roger Labbe. "Kalman and Bayesian Filters in Python" https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python """ def __init__(self, dim_x, dim_z, dim_u=0): if dim_x < 1: raise ValueError("dim_x must be 1 or greater") if dim_z < 1: raise ValueError("dim_z must be 1 or greater") if dim_u < 0: raise ValueError("dim_u must be 0 or greater") self.dim_x = dim_x self.dim_z = dim_z self.dim_u = dim_u self.x = zeros((dim_x, 1)) # state self.P = eye(dim_x) # uncertainty covariance self.Q = eye(dim_x) # process uncertainty self.B = None # control transition matrix self.F = eye(dim_x) # state transition matrix self.H = zeros((dim_z, dim_x)) # measurement function self.R = eye(dim_z) # measurement uncertainty self._alpha_sq = 1.0 # fading memory control self.M = np.zeros((dim_x, dim_z)) # process-measurement cross correlation self.z = np.array([[None] * self.dim_z]).T # gain and residual are computed during the innovation step. We # save them so that in case you want to inspect them for various # purposes self.K = np.zeros((dim_x, dim_z)) # kalman gain self.y = zeros((dim_z, 1)) self.S = np.zeros((dim_z, dim_z)) # system uncertainty self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty # identity matrix. Do not alter this. self._I = np.eye(dim_x) # these will always be a copy of x,P after predict() is called self.x_prior = self.x.copy() self.P_prior = self.P.copy() # these will always be a copy of x,P after update() is called self.x_post = self.x.copy() self.P_post = self.P.copy() # Only computed only if requested via property self._log_likelihood = log(sys.float_info.min) self._likelihood = sys.float_info.min self._mahalanobis = None # keep all observations self.history_obs = [] self.inv = np.linalg.inv self.attr_saved = None self.observed = False self.last_measurement = None def predict(self, u=None, B=None, F=None, Q=None): """ Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- u : np.array, default 0 Optional control vector. B : np.array(dim_x, dim_u), or None Optional control transition matrix; a value of None will cause the filter to use `self.B`. F : np.array(dim_x, dim_x), or None Optional state transition matrix; a value of None will cause the filter to use `self.F`. Q : np.array(dim_x, dim_x), scalar, or None Optional process noise matrix; a value of None will cause the filter to use `self.Q`. """ if B is None: B = self.B if F is None: F = self.F if Q is None: Q = self.Q elif isscalar(Q): Q = eye(self.dim_x) * Q # x = Fx + Bu if B is not None and u is not None: self.x = dot(F, self.x) + dot(B, u) else: self.x = dot(F, self.x) # P = FPF' + Q self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q # save prior self.x_prior = self.x.copy() self.P_prior = self.P.copy() def freeze(self): """ Save the parameters before non-observation forward """ self.attr_saved = deepcopy(self.__dict__) def apply_affine_correction(self, m, t, new_kf): """ Apply to both last state and last observation for OOS smoothing. Messy due to internal logic for kalman filter being messy. """ if new_kf: big_m = np.kron(np.eye(4, dtype=float), m) self.x = big_m @ self.x self.x[:2] += t self.P = big_m @ self.P @ big_m.T # If frozen, also need to update the frozen state for OOS if not self.observed and self.attr_saved is not None: self.attr_saved["x"] = big_m @ self.attr_saved["x"] self.attr_saved["x"][:2] += t self.attr_saved["P"] = big_m @ self.attr_saved["P"] @ big_m.T self.attr_saved["last_measurement"][:2] = m @ self.attr_saved["last_measurement"][:2] + t self.attr_saved["last_measurement"][2:] = m @ self.attr_saved["last_measurement"][2:] else: scale = np.linalg.norm(m[:, 0]) self.x[:2] = m @ self.x[:2] + t self.x[4:6] = m @ self.x[4:6] # self.x[2] *= scale # self.x[6] *= scale self.P[:2, :2] = m @ self.P[:2, :2] @ m.T self.P[4:6, 4:6] = m @ self.P[4:6, 4:6] @ m.T # self.P[2, 2] *= 2 * scale # self.P[6, 6] *= 2 * scale # If frozen, also need to update the frozen state for OOS if not self.observed and self.attr_saved is not None: self.attr_saved["x"][:2] = m @ self.attr_saved["x"][:2] + t self.attr_saved["x"][4:6] = m @ self.attr_saved["x"][4:6] # self.attr_saved["x"][2] *= scale # self.attr_saved["x"][6] *= scale self.attr_saved["P"][:2, :2] = m @ self.attr_saved["P"][:2, :2] @ m.T self.attr_saved["P"][4:6, 4:6] = m @ self.attr_saved["P"][4:6, 4:6] @ m.T # self.attr_saved["P"][2, 2] *= 2 * scale # self.attr_saved["P"][6, 6] *= 2 * scale self.attr_saved["last_measurement"][:2] = m @ self.attr_saved["last_measurement"][:2] + t # self.attr_saved["last_measurement"][2] *= scale def unfreeze(self): if self.attr_saved is not None: new_history = deepcopy(self.history_obs) self.__dict__ = self.attr_saved # self.history_obs = new_history self.history_obs = self.history_obs[:-1] occur = [int(d is None) for d in new_history] indices = np.where(np.array(occur) == 0)[0] index1 = indices[-2] index2 = indices[-1] # box1 = new_history[index1] box1 = self.last_measurement x1, y1, s1, r1 = box1 w1 = np.sqrt(s1 * r1) h1 = np.sqrt(s1 / r1) box2 = new_history[index2] x2, y2, s2, r2 = box2 w2 = np.sqrt(s2 * r2) h2 = np.sqrt(s2 / r2) time_gap = index2 - index1 dx = (x2 - x1) / time_gap dy = (y2 - y1) / time_gap dw = (w2 - w1) / time_gap dh = (h2 - h1) / time_gap for i in range(index2 - index1): """ The default virtual trajectory generation is by linear motion (constant speed hypothesis), you could modify this part to implement your own. """ x = x1 + (i + 1) * dx y = y1 + (i + 1) * dy w = w1 + (i + 1) * dw h = h1 + (i + 1) * dh s = w * h r = w / float(h) new_box = np.array([x, y, s, r]).reshape((4, 1)) """ I still use predict-update loop here to refresh the parameters, but this can be faster by directly modifying the internal parameters as suggested in the paper. I keep this naive but slow way for easy read and understanding """ self.update(new_box) if not i == (index2 - index1 - 1): self.predict() def update(self, z, R=None, H=None): """ Add a new measurement (z) to the Kalman filter. If z is None, nothing is computed. However, x_post and P_post are updated with the prior (x_prior, P_prior), and self.z is set to None. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. If you pass in a value of H, z must be a column vector the of the correct size. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. H : np.array, or None Optionally provide H to override the measurement function for this one call, otherwise self.H will be used. """ # set to None to force recompute self._log_likelihood = None self._likelihood = None self._mahalanobis = None # append the observation self.history_obs.append(z) if z is None: if self.observed: """ Got no observation so freeze the current parameters for future potential online smoothing. """ self.last_measurement = self.history_obs[-2] self.freeze() self.observed = False self.z = np.array([[None] * self.dim_z]).T self.x_post = self.x.copy() self.P_post = self.P.copy() self.y = zeros((self.dim_z, 1)) return # self.observed = True if not self.observed: """ Get observation, use online smoothing to re-update parameters """ self.unfreeze() self.observed = True if R is None: R = self.R elif isscalar(R): R = eye(self.dim_z) * R if H is None: z = reshape_z(z, self.dim_z, self.x.ndim) H = self.H # y = z - Hx # error (residual) between measurement and prediction self.y = z - dot(H, self.x) # common subexpression for speed PHT = dot(self.P, H.T) # S = HPH' + R # project system uncertainty into measurement space self.S = dot(H, PHT) + R self.SI = self.inv(self.S) # K = PH'inv(S) # map system uncertainty into kalman gain self.K = dot(PHT, self.SI) # x = x + Ky # predict new x with residual scaled by the kalman gain self.x = self.x + dot(self.K, self.y) # P = (I-KH)P(I-KH)' + KRK' # This is more numerically stable # and works for non-optimal K vs the equation # P = (I-KH)P usually seen in the literature. I_KH = self._I - dot(self.K, H) self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T) # save measurement and posterior state self.z = deepcopy(z) self.x_post = self.x.copy() self.P_post = self.P.copy() def md_for_measurement(self, z): """Mahalanobis distance for any measurement. Should be run after a prediction() call. """ z = reshape_z(z, self.dim_z, self.x.ndim) H = self.H y = z - dot(H, self.x) md = sqrt(float(dot(dot(y.T, self.SI), y))) return md def predict_steadystate(self, u=0, B=None): """ Predict state (prior) using the Kalman filter state propagation equations. Only x is updated, P is left unchanged. See update_steadstate() for a longer explanation of when to use this method. Parameters ---------- u : np.array Optional control vector. If non-zero, it is multiplied by B to create the control input into the system. B : np.array(dim_x, dim_u), or None Optional control transition matrix; a value of None will cause the filter to use `self.B`. """ if B is None: B = self.B # x = Fx + Bu if B is not None: self.x = dot(self.F, self.x) + dot(B, u) else: self.x = dot(self.F, self.x) # save prior self.x_prior = self.x.copy() self.P_prior = self.P.copy() def update_steadystate(self, z): """ Add a new measurement (z) to the Kalman filter without recomputing the Kalman gain K, the state covariance P, or the system uncertainty S. You can use this for LTI systems since the Kalman gain and covariance converge to a fixed value. Precompute these and assign them explicitly, or run the Kalman filter using the normal predict()/update(0 cycle until they converge. The main advantage of this call is speed. We do significantly less computation, notably avoiding a costly matrix inversion. Use in conjunction with predict_steadystate(), otherwise P will grow without bound. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. Examples -------- >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter >>> # let filter converge on representative data, then save k and P >>> for i in range(100): >>> cv.predict() >>> cv.update([i, i, i]) >>> saved_k = np.copy(cv.K) >>> saved_P = np.copy(cv.P) later on: >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter >>> cv.K = np.copy(saved_K) >>> cv.P = np.copy(saved_P) >>> for i in range(100): >>> cv.predict_steadystate() >>> cv.update_steadystate([i, i, i]) """ # set to None to force recompute self._log_likelihood = None self._likelihood = None self._mahalanobis = None if z is None: self.z = np.array([[None] * self.dim_z]).T self.x_post = self.x.copy() self.P_post = self.P.copy() self.y = zeros((self.dim_z, 1)) return z = reshape_z(z, self.dim_z, self.x.ndim) # y = z - Hx # error (residual) between measurement and prediction self.y = z - dot(self.H, self.x) # x = x + Ky # predict new x with residual scaled by the kalman gain self.x = self.x + dot(self.K, self.y) self.z = deepcopy(z) self.x_post = self.x.copy() self.P_post = self.P.copy() # set to None to force recompute self._log_likelihood = None self._likelihood = None self._mahalanobis = None def update_correlated(self, z, R=None, H=None): """Add a new measurement (z) to the Kalman filter assuming that process noise and measurement noise are correlated as defined in the `self.M` matrix. A partial derivation can be found in [1] If z is None, nothing is changed. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. H : np.array, or None Optionally provide H to override the measurement function for this one call, otherwise self.H will be used. References ---------- .. [1] Bulut, Y. (2011). Applied Kalman filter theory (Doctoral dissertation, Northeastern University). http://people.duke.edu/~hpgavin/SystemID/References/Balut-KalmanFilter-PhD-NEU-2011.pdf """ # set to None to force recompute self._log_likelihood = None self._likelihood = None self._mahalanobis = None if z is None: self.z = np.array([[None] * self.dim_z]).T self.x_post = self.x.copy() self.P_post = self.P.copy() self.y = zeros((self.dim_z, 1)) return if R is None: R = self.R elif isscalar(R): R = eye(self.dim_z) * R # rename for readability and a tiny extra bit of speed if H is None: z = reshape_z(z, self.dim_z, self.x.ndim) H = self.H # handle special case: if z is in form [[z]] but x is not a column # vector dimensions will not match if self.x.ndim == 1 and shape(z) == (1, 1): z = z[0] if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3) z = np.asarray([z]) # y = z - Hx # error (residual) between measurement and prediction self.y = z - dot(H, self.x) # common subexpression for speed PHT = dot(self.P, H.T) # project system uncertainty into measurement space self.S = dot(H, PHT) + dot(H, self.M) + dot(self.M.T, H.T) + R self.SI = self.inv(self.S) # K = PH'inv(S) # map system uncertainty into kalman gain self.K = dot(PHT + self.M, self.SI) # x = x + Ky # predict new x with residual scaled by the kalman gain self.x = self.x + dot(self.K, self.y) self.P = self.P - dot(self.K, dot(H, self.P) + self.M.T) self.z = deepcopy(z) self.x_post = self.x.copy() self.P_post = self.P.copy() def batch_filter( self, zs, Fs=None, Qs=None, Hs=None, Rs=None, Bs=None, us=None, update_first=False, saver=None, ): """Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step `self.dt`. Missing measurements must be represented by `None`. Fs : None, list-like, default=None optional value or list of values to use for the state transition matrix F. If Fs is None then self.F is used for all epochs. Otherwise it must contain a list-like list of F's, one for each epoch. This allows you to have varying F per epoch. Qs : None, np.array or list-like, default=None optional value or list of values to use for the process error covariance Q. If Qs is None then self.Q is used for all epochs. Otherwise it must contain a list-like list of Q's, one for each epoch. This allows you to have varying Q per epoch. Hs : None, np.array or list-like, default=None optional list of values to use for the measurement matrix H. If Hs is None then self.H is used for all epochs. If Hs contains a single matrix, then it is used as H for all epochs. Otherwise it must contain a list-like list of H's, one for each epoch. This allows you to have varying H per epoch. Rs : None, np.array or list-like, default=None optional list of values to use for the measurement error covariance R. If Rs is None then self.R is used for all epochs. Otherwise it must contain a list-like list of R's, one for each epoch. This allows you to have varying R per epoch. Bs : None, np.array or list-like, default=None optional list of values to use for the control transition matrix B. If Bs is None then self.B is used for all epochs. Otherwise it must contain a list-like list of B's, one for each epoch. This allows you to have varying B per epoch. us : None, np.array or list-like, default=None optional list of values to use for the control input vector; If us is None then None is used for all epochs (equivalent to 0, or no control input). Otherwise it must contain a list-like list of u's, one for each epoch. update_first : bool, optional, default=False controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means : np.array((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. means_predictions : np.array((n,dim_x,1)) array of the state for each time step after the predictions. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance_predictions : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the prediction. In other words `covariance[k,:,:]` is the covariance at step `k`. Examples -------- .. code-block:: Python # this example demonstrates tracking a measurement where the time # between measurement varies, as stored in dts. This requires # that F be recomputed for each epoch. The output is then smoothed # with an RTS smoother. zs = [t + random.randn()*4 for t in range (40)] Fs = [np.array([[1., dt], [0, 1]] for dt in dts] (mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs) (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs) """ # pylint: disable=too-many-statements n = np.size(zs, 0) if Fs is None: Fs = [self.F] * n if Qs is None: Qs = [self.Q] * n if Hs is None: Hs = [self.H] * n if Rs is None: Rs = [self.R] * n if Bs is None: Bs = [self.B] * n if us is None: us = [0] * n # mean estimates from Kalman Filter if self.x.ndim == 1: means = zeros((n, self.dim_x)) means_p = zeros((n, self.dim_x)) else: means = zeros((n, self.dim_x, 1)) means_p = zeros((n, self.dim_x, 1)) # state covariances from Kalman Filter covariances = zeros((n, self.dim_x, self.dim_x)) covariances_p = zeros((n, self.dim_x, self.dim_x)) if update_first: for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): self.update(z, R=R, H=H) means[i, :] = self.x covariances[i, :, :] = self.P self.predict(u=u, B=B, F=F, Q=Q) means_p[i, :] = self.x covariances_p[i, :, :] = self.P if saver is not None: saver.save() else: for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): self.predict(u=u, B=B, F=F, Q=Q) means_p[i, :] = self.x covariances_p[i, :, :] = self.P self.update(z, R=R, H=H) means[i, :] = self.x covariances[i, :, :] = self.P if saver is not None: saver.save() return (means, covariances, means_p, covariances_p) def rts_smoother(self, Xs, Ps, Fs=None, Qs=None, inv=np.linalg.inv): """ Runs the Rauch-Tung-Striebel Kalman smoother on a set of means and covariances computed by a Kalman filter. The usual input would come from the output of `KalmanFilter.batch_filter()`. Parameters ---------- Xs : numpy.array array of the means (state variable x) of the output of a Kalman filter. Ps : numpy.array array of the covariances of the output of a kalman filter. Fs : list-like collection of numpy.array, optional State transition matrix of the Kalman filter at each time step. Optional, if not provided the filter's self.F will be used Qs : list-like collection of numpy.array, optional Process noise of the Kalman filter at each time step. Optional, if not provided the filter's self.Q will be used inv : function, default numpy.linalg.inv If you prefer another inverse function, such as the Moore-Penrose pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv Returns ------- x : numpy.ndarray smoothed means P : numpy.ndarray smoothed state covariances K : numpy.ndarray smoother gain at each step Pp : numpy.ndarray Predicted state covariances Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = kalman.batch_filter(zs) (x, P, K, Pp) = rts_smoother(mu, cov, kf.F, kf.Q) """ if len(Xs) != len(Ps): raise ValueError("length of Xs and Ps must be the same") n = Xs.shape[0] dim_x = Xs.shape[1] if Fs is None: Fs = [self.F] * n if Qs is None: Qs = [self.Q] * n # smoother gain K = zeros((n, dim_x, dim_x)) x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy() for k in range(n - 2, -1, -1): Pp[k] = dot(dot(Fs[k + 1], P[k]), Fs[k + 1].T) + Qs[k + 1] # pylint: disable=bad-whitespace K[k] = dot(dot(P[k], Fs[k + 1].T), inv(Pp[k])) x[k] += dot(K[k], x[k + 1] - dot(Fs[k + 1], x[k])) P[k] += dot(dot(K[k], P[k + 1] - Pp[k]), K[k].T) return (x, P, K, Pp) def get_prediction(self, u=None, B=None, F=None, Q=None): """ Predict next state (prior) using the Kalman filter state propagation equations and returns it without modifying the object. Parameters ---------- u : np.array, default 0 Optional control vector. B : np.array(dim_x, dim_u), or None Optional control transition matrix; a value of None will cause the filter to use `self.B`. F : np.array(dim_x, dim_x), or None Optional state transition matrix; a value of None will cause the filter to use `self.F`. Q : np.array(dim_x, dim_x), scalar, or None Optional process noise matrix; a value of None will cause the filter to use `self.Q`. Returns ------- (x, P) : tuple State vector and covariance array of the prediction. """ if B is None: B = self.B if F is None: F = self.F if Q is None: Q = self.Q elif isscalar(Q): Q = eye(self.dim_x) * Q # x = Fx + Bu if B is not None and u is not None: x = dot(F, self.x) + dot(B, u) else: x = dot(F, self.x) # P = FPF' + Q P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q return x, P def get_update(self, z=None): """ Computes the new estimate based on measurement `z` and returns it without altering the state of the filter. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. Returns ------- (x, P) : tuple State vector and covariance array of the update. """ if z is None: return self.x, self.P z = reshape_z(z, self.dim_z, self.x.ndim) R = self.R H = self.H P = self.P x = self.x # error (residual) between measurement and prediction y = z - dot(H, x) # common subexpression for speed PHT = dot(P, H.T) # project system uncertainty into measurement space S = dot(H, PHT) + R # map system uncertainty into kalman gain K = dot(PHT, self.inv(S)) # predict new x with residual scaled by the kalman gain x = x + dot(K, y) # P = (I-KH)P(I-KH)' + KRK' I_KH = self._I - dot(K, H) P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) return x, P def residual_of(self, z): """ Returns the residual for the given measurement (z). Does not alter the state of the filter. """ z = reshape_z(z, self.dim_z, self.x.ndim) return z - dot(self.H, self.x_prior) def measurement_of_state(self, x): """ Helper function that converts a state into a measurement. Parameters ---------- x : np.array kalman state vector Returns ------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. """ return dot(self.H, x) @property def log_likelihood(self): """ log-likelihood of the last measurement. """ if self._log_likelihood is None: self._log_likelihood = logpdf(x=self.y, cov=self.S) return self._log_likelihood @property def likelihood(self): """ Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min. """ if self._likelihood is None: self._likelihood = exp(self.log_likelihood) if self._likelihood == 0: self._likelihood = sys.float_info.min return self._likelihood @property def mahalanobis(self): """ " Mahalanobis distance of measurement. E.g. 3 means measurement was 3 standard deviations away from the predicted value. Returns ------- mahalanobis : float """ if self._mahalanobis is None: self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y))) return self._mahalanobis @property def alpha(self): """ Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon [1]_. """ return self._alpha_sq**0.5 def log_likelihood_of(self, z): """ log likelihood of the measurement `z`. This should only be called after a call to update(). Calling after predict() will yield an incorrect result.""" if z is None: return log(sys.float_info.min) return logpdf(z, dot(self.H, self.x), self.S) @alpha.setter def alpha(self, value): if not np.isscalar(value) or value < 1: raise ValueError("alpha must be a float greater than 1") self._alpha_sq = value**2 def __repr__(self): return "\n".join( [ "KalmanFilter object", pretty_str("dim_x", self.dim_x), pretty_str("dim_z", self.dim_z), pretty_str("dim_u", self.dim_u), pretty_str("x", self.x), pretty_str("P", self.P), pretty_str("x_prior", self.x_prior), pretty_str("P_prior", self.P_prior), pretty_str("x_post", self.x_post), pretty_str("P_post", self.P_post), pretty_str("F", self.F), pretty_str("Q", self.Q), pretty_str("R", self.R), pretty_str("H", self.H), pretty_str("K", self.K), pretty_str("y", self.y), pretty_str("S", self.S), pretty_str("SI", self.SI), pretty_str("M", self.M), pretty_str("B", self.B), pretty_str("z", self.z), pretty_str("log-likelihood", self.log_likelihood), pretty_str("likelihood", self.likelihood), pretty_str("mahalanobis", self.mahalanobis), pretty_str("alpha", self.alpha), pretty_str("inv", self.inv), ] ) def test_matrix_dimensions(self, z=None, H=None, R=None, F=None, Q=None): """ Performs a series of asserts to check that the size of everything is what it should be. This can help you debug problems in your design. If you pass in H, R, F, Q those will be used instead of this object's value for those matrices. Testing `z` (the measurement) is problamatic. x is a vector, and can be implemented as either a 1D array or as a nx1 column vector. Thus Hx can be of different shapes. Then, if Hx is a single value, it can be either a 1D array or 2D vector. If either is true, z can reasonably be a scalar (either '3' or np.array('3') are scalars under this definition), a 1D, 1 element array, or a 2D, 1 element array. You are allowed to pass in any combination that works. """ if H is None: H = self.H if R is None: R = self.R if F is None: F = self.F if Q is None: Q = self.Q x = self.x P = self.P assert x.ndim == 1 or x.ndim == 2, "x must have one or two dimensions, but has {}".format(x.ndim) if x.ndim == 1: assert x.shape[0] == self.dim_x, "Shape of x must be ({},{}), but is {}".format(self.dim_x, 1, x.shape) else: assert x.shape == ( self.dim_x, 1, ), "Shape of x must be ({},{}), but is {}".format(self.dim_x, 1, x.shape) assert P.shape == ( self.dim_x, self.dim_x, ), "Shape of P must be ({},{}), but is {}".format(self.dim_x, self.dim_x, P.shape) assert Q.shape == ( self.dim_x, self.dim_x, ), "Shape of Q must be ({},{}), but is {}".format(self.dim_x, self.dim_x, P.shape) assert F.shape == ( self.dim_x, self.dim_x, ), "Shape of F must be ({},{}), but is {}".format(self.dim_x, self.dim_x, F.shape) assert np.ndim(H) == 2, "Shape of H must be (dim_z, {}), but is {}".format(P.shape[0], shape(H)) assert H.shape[1] == P.shape[0], "Shape of H must be (dim_z, {}), but is {}".format(P.shape[0], H.shape) # shape of R must be the same as HPH' hph_shape = (H.shape[0], H.shape[0]) r_shape = shape(R) if H.shape[0] == 1: # r can be scalar, 1D, or 2D in this case assert r_shape in [ (), (1,), (1, 1), ], "R must be scalar or one element array, but is shaped {}".format(r_shape) else: assert r_shape == hph_shape, "shape of R should be {} but it is {}".format(hph_shape, r_shape) if z is not None: z_shape = shape(z) else: z_shape = (self.dim_z, 1) # H@x must have shape of z Hx = dot(H, x) if z_shape == (): # scalar or np.array(scalar) assert Hx.ndim == 1 or shape(Hx) == ( 1, 1, ), "shape of z should be {}, not {} for the given H".format(shape(Hx), z_shape) elif shape(Hx) == (1,): assert z_shape[0] == 1, "Shape of z must be {} for the given H".format(shape(Hx)) else: assert z_shape == shape(Hx) or ( len(z_shape) == 1 and shape(Hx) == (z_shape[0], 1) ), "shape of z should be {}, not {} for the given H".format(shape(Hx), z_shape) if np.ndim(Hx) > 1 and shape(Hx) != (1, 1): assert shape(Hx) == z_shape, "shape of z should be {} for the given H, but it is {}".format( shape(Hx), z_shape ) def update(x, P, z, R, H=None, return_all=False): """ Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. This can handle either the multidimensional or unidimensional case. If all parameters are floats instead of arrays the filter will still work, and return floats for x, P as the result. update(1, 2, 1, 1, 1) # univariate update(x, P, 1 Parameters ---------- x : numpy.array(dim_x, 1), or float State estimate vector P : numpy.array(dim_x, dim_x), or float Covariance matrix z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. R : numpy.array(dim_z, dim_z), or float Measurement noise matrix H : numpy.array(dim_x, dim_x), or float, optional Measurement function. If not provided, a value of 1 is assumed. return_all : bool, default False If true, y, K, S, and log_likelihood are returned, otherwise only x and P are returned. Returns ------- x : numpy.array Posterior state estimate vector P : numpy.array Posterior covariance matrix y : numpy.array or scalar Residua. Difference between measurement and state in measurement space K : numpy.array Kalman gain S : numpy.array System uncertainty in measurement space log_likelihood : float log likelihood of the measurement """ # pylint: disable=bare-except if z is None: if return_all: return x, P, None, None, None, None return x, P if H is None: H = np.array([1]) if np.isscalar(H): H = np.array([H]) Hx = np.atleast_1d(dot(H, x)) z = reshape_z(z, Hx.shape[0], x.ndim) # error (residual) between measurement and prediction y = z - Hx # project system uncertainty into measurement space S = dot(dot(H, P), H.T) + R # map system uncertainty into kalman gain try: K = dot(dot(P, H.T), linalg.inv(S)) except: # can't invert a 1D array, annoyingly K = dot(dot(P, H.T), 1.0 / S) # predict new x with residual scaled by the kalman gain x = x + dot(K, y) # P = (I-KH)P(I-KH)' + KRK' KH = dot(K, H) try: I_KH = np.eye(KH.shape[0]) - KH except: I_KH = np.array([1 - KH]) P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) if return_all: # compute log likelihood log_likelihood = logpdf(z, dot(H, x), S) return x, P, y, K, S, log_likelihood return x, P def update_steadystate(x, z, K, H=None): """ Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. Parameters ---------- x : numpy.array(dim_x, 1), or float State estimate vector z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. K : numpy.array, or float Kalman gain matrix H : numpy.array(dim_x, dim_x), or float, optional Measurement function. If not provided, a value of 1 is assumed. Returns ------- x : numpy.array Posterior state estimate vector Examples -------- This can handle either the multidimensional or unidimensional case. If all parameters are floats instead of arrays the filter will still work, and return floats for x, P as the result. >>> update_steadystate(1, 2, 1) # univariate >>> update_steadystate(x, P, z, H) """ if z is None: return x if H is None: H = np.array([1]) if np.isscalar(H): H = np.array([H]) Hx = np.atleast_1d(dot(H, x)) z = reshape_z(z, Hx.shape[0], x.ndim) # error (residual) between measurement and prediction y = z - Hx # estimate new x with residual scaled by the kalman gain return x + dot(K, y) def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.0): """ Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- x : numpy.array State estimate vector P : numpy.array Covariance matrix F : numpy.array() State Transition matrix Q : numpy.array, Optional Process noise matrix u : numpy.array, Optional, default 0. Control vector. If non-zero, it is multiplied by B to create the control input into the system. B : numpy.array, optional, default 0. Control transition matrix. alpha : float, Optional, default=1.0 Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon Returns ------- x : numpy.array Prior state estimate vector P : numpy.array Prior covariance matrix """ if np.isscalar(F): F = np.array(F) x = dot(F, x) + dot(B, u) P = (alpha * alpha) * dot(dot(F, P), F.T) + Q return x, P def predict_steadystate(x, F=1, u=0, B=1): """ Predict next state (prior) using the Kalman filter state propagation equations. This steady state form only computes x, assuming that the covariance is constant. Parameters ---------- x : numpy.array State estimate vector P : numpy.array Covariance matrix F : numpy.array() State Transition matrix u : numpy.array, Optional, default 0. Control vector. If non-zero, it is multiplied by B to create the control input into the system. B : numpy.array, optional, default 0. Control transition matrix. Returns ------- x : numpy.array Prior state estimate vector """ if np.isscalar(F): F = np.array(F) x = dot(F, x) + dot(B, u) return x def batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None, update_first=False, saver=None): """ Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step. Missing measurements must be represented by None. Fs : list-like list of values to use for the state transition matrix matrix. Qs : list-like list of values to use for the process error covariance. Hs : list-like list of values to use for the measurement matrix. Rs : list-like list of values to use for the measurement error covariance. Bs : list-like, optional list of values to use for the control transition matrix; a value of None in any position will cause the filter to use `self.B` for that time step. us : list-like, optional list of values to use for the control input vector; a value of None in any position will cause the filter to use 0 for that time step. update_first : bool, optional controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means : np.array((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. means_predictions : np.array((n,dim_x,1)) array of the state for each time step after the predictions. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance_predictions : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the prediction. In other words `covariance[k,:,:]` is the covariance at step `k`. Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] Fs = [kf.F for t in range (40)] Hs = [kf.H for t in range (40)] (mu, cov, _, _) = kf.batch_filter(zs, Rs=R_list, Fs=Fs, Hs=Hs, Qs=None, Bs=None, us=None, update_first=False) (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs, Qs=None) """ n = np.size(zs, 0) dim_x = x.shape[0] # mean estimates from Kalman Filter if x.ndim == 1: means = zeros((n, dim_x)) means_p = zeros((n, dim_x)) else: means = zeros((n, dim_x, 1)) means_p = zeros((n, dim_x, 1)) # state covariances from Kalman Filter covariances = zeros((n, dim_x, dim_x)) covariances_p = zeros((n, dim_x, dim_x)) if us is None: us = [0.0] * n Bs = [0.0] * n if update_first: for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): x, P = update(x, P, z, R=R, H=H) means[i, :] = x covariances[i, :, :] = P x, P = predict(x, P, u=u, B=B, F=F, Q=Q) means_p[i, :] = x covariances_p[i, :, :] = P if saver is not None: saver.save() else: for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): x, P = predict(x, P, u=u, B=B, F=F, Q=Q) means_p[i, :] = x covariances_p[i, :, :] = P x, P = update(x, P, z, R=R, H=H) means[i, :] = x covariances[i, :, :] = P if saver is not None: saver.save() return (means, covariances, means_p, covariances_p) def rts_smoother(Xs, Ps, Fs, Qs): """ Runs the Rauch-Tung-Striebel Kalman smoother on a set of means and covariances computed by a Kalman filter. The usual input would come from the output of `KalmanFilter.batch_filter()`. Parameters ---------- Xs : numpy.array array of the means (state variable x) of the output of a Kalman filter. Ps : numpy.array array of the covariances of the output of a kalman filter. Fs : list-like collection of numpy.array State transition matrix of the Kalman filter at each time step. Qs : list-like collection of numpy.array, optional Process noise of the Kalman filter at each time step. Returns ------- x : numpy.ndarray smoothed means P : numpy.ndarray smoothed state covariances K : numpy.ndarray smoother gain at each step pP : numpy.ndarray predicted state covariances Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = kalman.batch_filter(zs) (x, P, K, pP) = rts_smoother(mu, cov, kf.F, kf.Q) """ if len(Xs) != len(Ps): raise ValueError("length of Xs and Ps must be the same") n = Xs.shape[0] dim_x = Xs.shape[1] # smoother gain K = zeros((n, dim_x, dim_x)) x, P, pP = Xs.copy(), Ps.copy(), Ps.copy() for k in range(n - 2, -1, -1): pP[k] = dot(dot(Fs[k], P[k]), Fs[k].T) + Qs[k] # pylint: disable=bad-whitespace K[k] = dot(dot(P[k], Fs[k].T), linalg.inv(pP[k])) x[k] += dot(K[k], x[k + 1] - dot(Fs[k], x[k])) P[k] += dot(dot(K[k], P[k + 1] - pP[k]), K[k].T) return (x, P, K, pP) ================================================ FILE: DLTA_AI_app/trackers/deepocsort/ocsort.py ================================================ """ This script is adopted from the SORT script by Alex Bewley alex@bewley.ai """ from __future__ import print_function import pdb import pickle import cv2 import torch import torchvision import numpy as np from .association import * from .embedding import EmbeddingComputer from .cmc import CMCComputer from .reid_multibackend import ReIDDetectMultiBackend from ultralytics.yolo.utils.ops import xyxy2xywh def k_previous_obs(observations, cur_age, k): if len(observations) == 0: return [-1, -1, -1, -1, -1] for i in range(k): dt = k - i if cur_age - dt in observations: return observations[cur_age - dt] max_age = max(observations.keys()) return observations[max_age] def convert_bbox_to_z(bbox): """ Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is the aspect ratio """ w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] x = bbox[0] + w / 2.0 y = bbox[1] + h / 2.0 s = w * h # scale is just area r = w / float(h + 1e-6) return np.array([x, y, s, r]).reshape((4, 1)) def convert_bbox_to_z_new(bbox): w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] x = bbox[0] + w / 2.0 y = bbox[1] + h / 2.0 return np.array([x, y, w, h]).reshape((4, 1)) def convert_x_to_bbox_new(x): x, y, w, h = x.reshape(-1)[:4] return np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2]).reshape(1, 4) def convert_x_to_bbox(x, score=None): """ Takes a bounding box in the centre form [x,y,s,r] and returns it in the form [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right """ w = np.sqrt(x[2] * x[3]) h = x[2] / w if score == None: return np.array([x[0] - w / 2.0, x[1] - h / 2.0, x[0] + w / 2.0, x[1] + h / 2.0]).reshape((1, 4)) else: return np.array([x[0] - w / 2.0, x[1] - h / 2.0, x[0] + w / 2.0, x[1] + h / 2.0, score]).reshape((1, 5)) def speed_direction(bbox1, bbox2): cx1, cy1 = (bbox1[0] + bbox1[2]) / 2.0, (bbox1[1] + bbox1[3]) / 2.0 cx2, cy2 = (bbox2[0] + bbox2[2]) / 2.0, (bbox2[1] + bbox2[3]) / 2.0 speed = np.array([cy2 - cy1, cx2 - cx1]) norm = np.sqrt((cy2 - cy1) ** 2 + (cx2 - cx1) ** 2) + 1e-6 return speed / norm def new_kf_process_noise(w, h, p=1 / 20, v=1 / 160): Q = np.diag( ((p * w) ** 2, (p * h) ** 2, (p * w) ** 2, (p * h) ** 2, (v * w) ** 2, (v * h) ** 2, (v * w) ** 2, (v * h) ** 2) ) return Q def new_kf_measurement_noise(w, h, m=1 / 20): w_var = (m * w) ** 2 h_var = (m * h) ** 2 R = np.diag((w_var, h_var, w_var, h_var)) return R class KalmanBoxTracker(object): """ This class represents the internal state of individual tracked objects observed as bbox. """ count = 0 def __init__(self, bbox, cls, delta_t=3, orig=False, emb=None, alpha=0, new_kf=False): """ Initialises a tracker using initial bounding box. """ # define constant velocity model if not orig: from .kalmanfilter import KalmanFilterNew as KalmanFilter else: from filterpy.kalman import KalmanFilter self.cls = cls self.conf = bbox[-1] self.new_kf = new_kf if new_kf: self.kf = KalmanFilter(dim_x=8, dim_z=4) self.kf.F = np.array( [ # x y w h x' y' w' h' [1, 0, 0, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1], ] ) self.kf.H = np.array( [ [1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], ] ) _, _, w, h = convert_bbox_to_z_new(bbox).reshape(-1) self.kf.P = new_kf_process_noise(w, h) self.kf.P[:4, :4] *= 4 self.kf.P[4:, 4:] *= 100 # Process and measurement uncertainty happen in functions self.bbox_to_z_func = convert_bbox_to_z_new self.x_to_bbox_func = convert_x_to_bbox_new else: self.kf = KalmanFilter(dim_x=7, dim_z=4) self.kf.F = np.array( [ # x y s r x' y' s' [1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1], ] ) self.kf.H = np.array( [ [1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], ] ) self.kf.R[2:, 2:] *= 10.0 self.kf.P[4:, 4:] *= 1000.0 # give high uncertainty to the unobservable initial velocities self.kf.P *= 10.0 self.kf.Q[-1, -1] *= 0.01 self.kf.Q[4:, 4:] *= 0.01 self.bbox_to_z_func = convert_bbox_to_z self.x_to_bbox_func = convert_x_to_bbox self.kf.x[:4] = self.bbox_to_z_func(bbox) self.time_since_update = 0 self.id = KalmanBoxTracker.count KalmanBoxTracker.count += 1 self.history = [] self.hits = 0 self.hit_streak = 0 self.age = 0 """ NOTE: [-1,-1,-1,-1,-1] is a compromising placeholder for non-observation status, the same for the return of function k_previous_obs. It is ugly and I do not like it. But to support generate observation array in a fast and unified way, which you would see below k_observations = np.array([k_previous_obs(...]]), let's bear it for now. """ # Used for OCR self.last_observation = np.array([-1, -1, -1, -1, -1]) # placeholder # Used to output track after min_hits reached self.history_observations = [] # Used for velocity self.observations = dict() self.velocity = None self.delta_t = delta_t self.emb = emb self.frozen = False def update(self, bbox, cls): """ Updates the state vector with observed bbox. """ if bbox is not None: self.frozen = False self.cls = cls if self.last_observation.sum() >= 0: # no previous observation previous_box = None for dt in range(self.delta_t, 0, -1): if self.age - dt in self.observations: previous_box = self.observations[self.age - dt] break if previous_box is None: previous_box = self.last_observation """ Estimate the track speed direction with observations \Delta t steps away """ self.velocity = speed_direction(previous_box, bbox) """ Insert new observations. This is a ugly way to maintain both self.observations and self.history_observations. Bear it for the moment. """ self.last_observation = bbox self.observations[self.age] = bbox self.history_observations.append(bbox) self.time_since_update = 0 self.history = [] self.hits += 1 self.hit_streak += 1 if self.new_kf: R = new_kf_measurement_noise(self.kf.x[2, 0], self.kf.x[3, 0]) self.kf.update(self.bbox_to_z_func(bbox), R=R) else: self.kf.update(self.bbox_to_z_func(bbox)) else: self.kf.update(bbox) self.frozen = True def update_emb(self, emb, alpha=0.9): self.emb = alpha * self.emb + (1 - alpha) * emb self.emb /= np.linalg.norm(self.emb) def get_emb(self): return self.emb.cpu() def apply_affine_correction(self, affine): m = affine[:, :2] t = affine[:, 2].reshape(2, 1) # For OCR if self.last_observation.sum() > 0: ps = self.last_observation[:4].reshape(2, 2).T ps = m @ ps + t self.last_observation[:4] = ps.T.reshape(-1) # Apply to each box in the range of velocity computation for dt in range(self.delta_t, -1, -1): if self.age - dt in self.observations: ps = self.observations[self.age - dt][:4].reshape(2, 2).T ps = m @ ps + t self.observations[self.age - dt][:4] = ps.T.reshape(-1) # Also need to change kf state, but might be frozen self.kf.apply_affine_correction(m, t, self.new_kf) def predict(self): """ Advances the state vector and returns the predicted bounding box estimate. """ # Don't allow negative bounding boxes if self.new_kf: if self.kf.x[2] + self.kf.x[6] <= 0: self.kf.x[6] = 0 if self.kf.x[3] + self.kf.x[7] <= 0: self.kf.x[7] = 0 # Stop velocity, will update in kf during OOS if self.frozen: self.kf.x[6] = self.kf.x[7] = 0 Q = new_kf_process_noise(self.kf.x[2, 0], self.kf.x[3, 0]) else: if (self.kf.x[6] + self.kf.x[2]) <= 0: self.kf.x[6] *= 0.0 Q = None self.kf.predict(Q=Q) self.age += 1 if self.time_since_update > 0: self.hit_streak = 0 self.time_since_update += 1 self.history.append(self.x_to_bbox_func(self.kf.x)) return self.history[-1] def get_state(self): """ Returns the current bounding box estimate. """ return self.x_to_bbox_func(self.kf.x) def mahalanobis(self, bbox): """Should be run after a predict() call for accuracy.""" return self.kf.md_for_measurement(self.bbox_to_z_func(bbox)) """ We support multiple ways for association cost calculation, by default we use IoU. GIoU may have better performance in some situations. We note that we hardly normalize the cost by all methods to (0,1) which may not be the best practice. """ ASSO_FUNCS = { "iou": iou_batch, "giou": giou_batch, "ciou": ciou_batch, "diou": diou_batch, "ct_dist": ct_dist, } class OCSort(object): def __init__( self, model_weights, device, fp16, det_thresh, max_age=30, min_hits=3, iou_threshold=0.3, delta_t=3, asso_func="iou", inertia=0.2, w_association_emb=0.75, alpha_fixed_emb=0.95, aw_param=0.5, embedding_off=False, cmc_off=False, aw_off=False, new_kf_off=False, **kwargs ): """ Sets key parameters for SORT """ self.max_age = max_age self.min_hits = min_hits self.iou_threshold = iou_threshold self.trackers = [] self.frame_count = 0 self.det_thresh = det_thresh self.delta_t = delta_t self.asso_func = ASSO_FUNCS[asso_func] self.inertia = inertia self.w_association_emb = w_association_emb self.alpha_fixed_emb = alpha_fixed_emb self.aw_param = aw_param KalmanBoxTracker.count = 0 self.embedder = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16) self.cmc = CMCComputer() self.embedding_off = embedding_off self.cmc_off = cmc_off self.aw_off = aw_off self.new_kf_off = new_kf_off def update(self, dets, img_numpy, tag='blub'): """ Params: dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...] Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections). Returns the a similar array, where the last column is the object ID. NOTE: The number of objects returned may differ from the number of detections provided. """ xyxys = dets[:, 0:4] scores = dets[:, 4] clss = dets[:, 5] classes = clss.numpy() xyxys = xyxys.numpy() scores = scores.numpy() dets = dets[:, 0:6].numpy() remain_inds = scores > self.det_thresh dets = dets[remain_inds] self.height, self.width = img_numpy.shape[:2] # Rescale #scale = min(img_tensor.shape[2] / img_numpy.shape[0], img_tensor.shape[3] / img_numpy.shape[1]) #dets[:, :4] /= scale # Embedding if self.embedding_off or dets.shape[0] == 0: dets_embs = np.ones((dets.shape[0], 1)) else: # (Ndets x X) [512, 1024, 2048] #dets_embs = self.embedder.compute_embedding(img_numpy, dets[:, :4], tag) dets_embs = self._get_features(dets[:, :4], img_numpy) # CMC if not self.cmc_off: transform = self.cmc.compute_affine(img_numpy, dets[:, :4], tag) for trk in self.trackers: trk.apply_affine_correction(transform) trust = (dets[:, 4] - self.det_thresh) / (1 - self.det_thresh) af = self.alpha_fixed_emb # From [self.alpha_fixed_emb, 1], goes to 1 as detector is less confident dets_alpha = af + (1 - af) * (1 - trust) # get predicted locations from existing trackers. trks = np.zeros((len(self.trackers), 5)) trk_embs = [] to_del = [] ret = [] for t, trk in enumerate(trks): pos = self.trackers[t].predict()[0] trk[:] = [pos[0], pos[1], pos[2], pos[3], 0] if np.any(np.isnan(pos)): to_del.append(t) else: trk_embs.append(self.trackers[t].get_emb()) trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) if len(trk_embs) > 0: trk_embs = np.vstack(trk_embs) else: trk_embs = np.array(trk_embs) for t in reversed(to_del): self.trackers.pop(t) velocities = np.array([trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers]) last_boxes = np.array([trk.last_observation for trk in self.trackers]) k_observations = np.array([k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers]) """ First round of association """ # (M detections X N tracks, final score) if self.embedding_off or dets.shape[0] == 0 or trk_embs.shape[0] == 0: stage1_emb_cost = None else: stage1_emb_cost = dets_embs @ trk_embs.T matched, unmatched_dets, unmatched_trks = associate( dets, trks, self.iou_threshold, velocities, k_observations, self.inertia, stage1_emb_cost, self.w_association_emb, self.aw_off, self.aw_param, ) for m in matched: self.trackers[m[1]].update(dets[m[0], :5], dets[m[0], 5]) self.trackers[m[1]].update_emb(dets_embs[m[0]], alpha=dets_alpha[m[0]]) """ Second round of associaton by OCR """ if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0: left_dets = dets[unmatched_dets] left_dets_embs = dets_embs[unmatched_dets] left_trks = last_boxes[unmatched_trks] left_trks_embs = trk_embs[unmatched_trks] iou_left = self.asso_func(left_dets, left_trks) # TODO: is better without this emb_cost_left = left_dets_embs @ left_trks_embs.T if self.embedding_off: emb_cost_left = np.zeros_like(emb_cost_left) iou_left = np.array(iou_left) if iou_left.max() > self.iou_threshold: """ NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may get a higher performance especially on MOT17/MOT20 datasets. But we keep it uniform here for simplicity """ rematched_indices = linear_assignment(-iou_left) to_remove_det_indices = [] to_remove_trk_indices = [] for m in rematched_indices: det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]] if iou_left[m[0], m[1]] < self.iou_threshold: continue self.trackers[trk_ind].update(dets[det_ind, :5], dets[det_ind, 5]) self.trackers[trk_ind].update_emb(dets_embs[det_ind], alpha=dets_alpha[det_ind]) to_remove_det_indices.append(det_ind) to_remove_trk_indices.append(trk_ind) unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices)) unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices)) for m in unmatched_trks: self.trackers[m].update(None, None) # create and initialise new trackers for unmatched detections for i in unmatched_dets: trk = KalmanBoxTracker( dets[i, :5], dets[i, 5], delta_t=self.delta_t, emb=dets_embs[i], alpha=dets_alpha[i], new_kf=not self.new_kf_off ) self.trackers.append(trk) i = len(self.trackers) for trk in reversed(self.trackers): if trk.last_observation.sum() < 0: d = trk.get_state()[0] else: """ this is optional to use the recent observation or the kalman filter prediction, we didn't notice significant difference here """ d = trk.last_observation[:4] if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits): # +1 as MOT benchmark requires positive ret.append(np.concatenate((d, [trk.id + 1], [trk.cls], [trk.conf])).reshape(1, -1)) i -= 1 # remove dead tracklet if trk.time_since_update > self.max_age: self.trackers.pop(i) if len(ret) > 0: return np.concatenate(ret) return np.empty((0, 5)) def _xywh_to_xyxy(self, bbox_xywh): x, y, w, h = bbox_xywh x1 = max(int(x - w / 2), 0) x2 = min(int(x + w / 2), self.width - 1) y1 = max(int(y - h / 2), 0) y2 = min(int(y + h / 2), self.height - 1) return x1, y1, x2, y2 def _get_features(self, bbox_xyxy, ori_img): im_crops = [] for box in bbox_xyxy: x1, y1, x2, y2 = box.astype(int) im = ori_img[y1:y2, x1:x2] im_crops.append(im) if im_crops: features = self.embedder(im_crops).cpu() else: features = np.array([]) return features def update_public(self, dets, cates, scores): self.frame_count += 1 det_scores = np.ones((dets.shape[0], 1)) dets = np.concatenate((dets, det_scores), axis=1) remain_inds = scores > self.det_thresh cates = cates[remain_inds] dets = dets[remain_inds] trks = np.zeros((len(self.trackers), 5)) to_del = [] ret = [] for t, trk in enumerate(trks): pos = self.trackers[t].predict()[0] cat = self.trackers[t].cate trk[:] = [pos[0], pos[1], pos[2], pos[3], cat] if np.any(np.isnan(pos)): to_del.append(t) trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) for t in reversed(to_del): self.trackers.pop(t) velocities = np.array([trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers]) last_boxes = np.array([trk.last_observation for trk in self.trackers]) k_observations = np.array([k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers]) matched, unmatched_dets, unmatched_trks = associate_kitti( dets, trks, cates, self.iou_threshold, velocities, k_observations, self.inertia, ) for m in matched: self.trackers[m[1]].update(dets[m[0], :]) if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0: """ The re-association stage by OCR. NOTE: at this stage, adding other strategy might be able to continue improve the performance, such as BYTE association by ByteTrack. """ left_dets = dets[unmatched_dets] left_trks = last_boxes[unmatched_trks] left_dets_c = left_dets.copy() left_trks_c = left_trks.copy() iou_left = self.asso_func(left_dets_c, left_trks_c) iou_left = np.array(iou_left) det_cates_left = cates[unmatched_dets] trk_cates_left = trks[unmatched_trks][:, 4] num_dets = unmatched_dets.shape[0] num_trks = unmatched_trks.shape[0] cate_matrix = np.zeros((num_dets, num_trks)) for i in range(num_dets): for j in range(num_trks): if det_cates_left[i] != trk_cates_left[j]: """ For some datasets, such as KITTI, there are different categories, we have to avoid associate them together. """ cate_matrix[i][j] = -1e6 iou_left = iou_left + cate_matrix if iou_left.max() > self.iou_threshold - 0.1: rematched_indices = linear_assignment(-iou_left) to_remove_det_indices = [] to_remove_trk_indices = [] for m in rematched_indices: det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]] if iou_left[m[0], m[1]] < self.iou_threshold - 0.1: continue self.trackers[trk_ind].update(dets[det_ind, :]) to_remove_det_indices.append(det_ind) to_remove_trk_indices.append(trk_ind) unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices)) unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices)) for i in unmatched_dets: trk = KalmanBoxTracker(dets[i, :]) trk.cate = cates[i] self.trackers.append(trk) i = len(self.trackers) for trk in reversed(self.trackers): if trk.last_observation.sum() > 0: d = trk.last_observation[:4] else: d = trk.get_state()[0] if trk.time_since_update < 1: if (self.frame_count <= self.min_hits) or (trk.hit_streak >= self.min_hits): # id+1 as MOT benchmark requires positive ret.append(np.concatenate((d, [trk.id + 1], [trk.cls], [trk.conf])).reshape(1, -1)) if trk.hit_streak == self.min_hits: # Head Padding (HP): recover the lost steps during initializing the track for prev_i in range(self.min_hits - 1): prev_observation = trk.history_observations[-(prev_i + 2)] ret.append( ( np.concatenate( ( prev_observation[:4], [trk.id + 1], [trk.cls], [trk.conf], ) ) ).reshape(1, -1) ) i -= 1 if trk.time_since_update > self.max_age: self.trackers.pop(i) if len(ret) > 0: return np.concatenate(ret) return np.empty((0, 7)) def dump_cache(self): self.cmc.dump_cache() self.embedder.dump_cache() ================================================ FILE: DLTA_AI_app/trackers/deepocsort/reid_multibackend.py ================================================ import torch.nn as nn import torch from pathlib import Path import numpy as np from itertools import islice import torchvision.transforms as transforms import cv2 import sys import torchvision.transforms as T from collections import OrderedDict, namedtuple import gdown from os.path import exists as file_exists from ultralytics.yolo.utils.checks import check_requirements, check_version from ultralytics.yolo.utils import LOGGER from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name, download_url, load_pretrained_weights) from trackers.strongsort.deep.models import build_model def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): # Check file(s) for acceptable suffix if file and suffix: if isinstance(suffix, str): suffix = [suffix] for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" class ReIDDetectMultiBackend(nn.Module): # ReID models MultiBackend class for python inference on various backends def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False): super().__init__() w = weights[0] if isinstance(weights, list) else weights self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w) # get backend self.fp16 = fp16 self.fp16 &= self.pt or self.jit or self.engine # FP16 # Build transform functions self.device = device self.image_size=(256, 128) self.pixel_mean=[0.485, 0.456, 0.406] self.pixel_std=[0.229, 0.224, 0.225] self.transforms = [] self.transforms += [T.Resize(self.image_size)] self.transforms += [T.ToTensor()] self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)] self.preprocess = T.Compose(self.transforms) self.to_pil = T.ToPILImage() model_name = get_model_name(w) if w.suffix == '.pt': model_url = get_model_url(w) if not file_exists(w) and model_url is not None: gdown.download(model_url, str(w), quiet=False) elif file_exists(w): pass else: print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:') show_downloadeable_models() exit() # Build model self.model = build_model( model_name, num_classes=1, pretrained=not (w and w.is_file()), use_gpu=device ) if self.pt: # PyTorch # populate model arch with weights if w and w.is_file() and w.suffix == '.pt': load_pretrained_weights(self.model, w) self.model.to(device).eval() self.model.half() if self.fp16 else self.model.float() elif self.jit: LOGGER.info(f'Loading {w} for TorchScript inference...') self.model = torch.jit.load(w) self.model.half() if self.fp16 else self.model.float() elif self.onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') cuda = torch.cuda.is_available() and device.type != 'cpu' #check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] self.session = onnxruntime.InferenceSession(str(w), providers=providers) elif self.engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 if device.type == 'cpu': device = torch.device('cuda:0') Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: self.model_ = runtime.deserialize_cuda_engine(f.read()) self.context = self.model_.create_execution_context() self.bindings = OrderedDict() self.fp16 = False # default updated below dynamic = False for index in range(self.model_.num_bindings): name = self.model_.get_binding_name(index) dtype = trt.nptype(self.model_.get_binding_dtype(index)) if self.model_.binding_is_input(index): if -1 in tuple(self.model_.get_binding_shape(index)): # dynamic dynamic = True self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2])) if dtype == np.float16: self.fp16 = True shape = tuple(self.context.get_binding_shape(index)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) batch_size = self.bindings['images'].shape[0] # if dynamic, this is instead max batch size elif self.xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch ie = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: network.get_parameters()[0].set_layout(Layout("NCWH")) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() self.executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 self.output_layer = next(iter(self.executable_network.outputs)) elif self.tflite: LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: import tensorflow as tf Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, self.interpreter = tf.lite.Interpreter(model_path=w) self.interpreter.allocate_tensors() # Get input and output tensors. self.input_details = self.interpreter.get_input_details() self.output_details = self.interpreter.get_output_details() # Test model on random input data. input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32) self.interpreter.set_tensor(self.input_details[0]['index'], input_data) self.interpreter.invoke() # The function `get_tensor()` returns a copy of the tensor data. output_data = self.interpreter.get_tensor(self.output_details[0]['index']) else: print('This model framework is not supported yet!') exit() @staticmethod def model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx from trackers.reid_export import export_formats sf = list(export_formats().Suffix) # export suffixes check_suffix(p, sf) # checks types = [s in Path(p).name for s in sf] return types def _preprocess(self, im_batch): images = [] for element in im_batch: image = self.to_pil(element) image = self.preprocess(image) images.append(image) images = torch.stack(images, dim=0) images = images.to(self.device) return images def forward(self, im_batch): # preprocess batch im_batch = self._preprocess(im_batch) # batch to half if self.fp16 and im_batch.dtype != torch.float16: im_batch = im_batch.half() # batch processing features = [] if self.pt: features = self.model(im_batch) elif self.jit: # TorchScript features = self.model(im_batch) elif self.onnx: # ONNX Runtime im_batch = im_batch.cpu().numpy() # torch to numpy features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0] elif self.engine: # TensorRT if True and im_batch.shape != self.bindings['images'].shape: i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output')) self.context.set_binding_shape(i_in, im_batch.shape) # reshape if dynamic self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape) self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) s = self.bindings['images'].shape assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs['images'] = int(im_batch.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) features = self.bindings['output'].data elif self.xml: # OpenVINO im_batch = im_batch.cpu().numpy() # FP32 features = self.executable_network([im_batch])[self.output_layer] else: print('Framework not supported at the moment, we are working on it...') exit() if isinstance(features, (list, tuple)): return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features] else: return self.from_numpy(features) def from_numpy(self, x): return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=[(256, 128, 3)]): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite if any(warmup_types) and self.device.type != 'cpu': im = [np.empty(*imgsz).astype(np.uint8)] # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup ================================================ FILE: DLTA_AI_app/trackers/multi_tracker_zoo.py ================================================ from trackers.strongsort.utils.parser import get_config def create_tracker(tracker_type, tracker_config, reid_weights, device, half): cfg = get_config() cfg.merge_from_file(tracker_config) if tracker_type == 'strongsort': from trackers.strongsort.strong_sort import StrongSORT strongsort = StrongSORT( reid_weights, device, half, max_dist=cfg.strongsort.max_dist, max_iou_dist=cfg.strongsort.max_iou_dist, max_age=cfg.strongsort.max_age, max_unmatched_preds=cfg.strongsort.max_unmatched_preds, n_init=cfg.strongsort.n_init, nn_budget=cfg.strongsort.nn_budget, mc_lambda=cfg.strongsort.mc_lambda, ema_alpha=cfg.strongsort.ema_alpha, ) return strongsort elif tracker_type == 'ocsort': from trackers.ocsort.ocsort import OCSort ocsort = OCSort( det_thresh=cfg.ocsort.det_thresh, max_age=cfg.ocsort.max_age, min_hits=cfg.ocsort.min_hits, iou_threshold=cfg.ocsort.iou_thresh, delta_t=cfg.ocsort.delta_t, asso_func=cfg.ocsort.asso_func, inertia=cfg.ocsort.inertia, use_byte=cfg.ocsort.use_byte, ) return ocsort elif tracker_type == 'bytetrack': from trackers.bytetrack.byte_tracker import BYTETracker bytetracker = BYTETracker( track_thresh=cfg.bytetrack.track_thresh, match_thresh=cfg.bytetrack.match_thresh, track_buffer=cfg.bytetrack.track_buffer, frame_rate=cfg.bytetrack.frame_rate ) return bytetracker elif tracker_type == 'botsort': from trackers.botsort.bot_sort import BoTSORT botsort = BoTSORT( reid_weights, device, half, track_high_thresh=cfg.botsort.track_high_thresh, new_track_thresh=cfg.botsort.new_track_thresh, track_buffer =cfg.botsort.track_buffer, match_thresh=cfg.botsort.match_thresh, proximity_thresh=cfg.botsort.proximity_thresh, appearance_thresh=cfg.botsort.appearance_thresh, cmc_method =cfg.botsort.cmc_method, frame_rate=cfg.botsort.frame_rate, lambda_=cfg.botsort.lambda_ ) return botsort elif tracker_type == 'deepocsort': from trackers.deepocsort.ocsort import OCSort botsort = OCSort( reid_weights, device, half, det_thresh=cfg.deepocsort.det_thresh, max_age=cfg.deepocsort.max_age, min_hits=cfg.deepocsort.min_hits, iou_threshold=cfg.deepocsort.iou_thresh, delta_t=cfg.deepocsort.delta_t, asso_func=cfg.deepocsort.asso_func, inertia=cfg.deepocsort.inertia, ) return botsort else: print('No such tracker') exit() ================================================ FILE: DLTA_AI_app/trackers/ocsort/association.py ================================================ import os import numpy as np def iou_batch(bboxes1, bboxes2): """ From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2] """ bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) w = np.maximum(0., xx2 - xx1) h = np.maximum(0., yy2 - yy1) wh = w * h o = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) return(o) def giou_batch(bboxes1, bboxes2): """ :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) :return: """ # for details should go to https://arxiv.org/pdf/1902.09630.pdf # ensure predict's bbox form bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) w = np.maximum(0., xx2 - xx1) h = np.maximum(0., yy2 - yy1) wh = w * h iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) wc = xxc2 - xxc1 hc = yyc2 - yyc1 assert((wc > 0).all() and (hc > 0).all()) area_enclose = wc * hc giou = iou - (area_enclose - wh) / area_enclose giou = (giou + 1.)/2.0 # resize from (-1,1) to (0,1) return giou def diou_batch(bboxes1, bboxes2): """ :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) :return: """ # for details should go to https://arxiv.org/pdf/1902.09630.pdf # ensure predict's bbox form bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) # calculate the intersection box xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) w = np.maximum(0., xx2 - xx1) h = np.maximum(0., yy2 - yy1) wh = w * h iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2 diou = iou - inner_diag / outer_diag return (diou + 1) / 2.0 # resize from (-1,1) to (0,1) def ciou_batch(bboxes1, bboxes2): """ :param bbox_p: predict of bbox(N,4)(x1,y1,x2,y2) :param bbox_g: groundtruth of bbox(N,4)(x1,y1,x2,y2) :return: """ # for details should go to https://arxiv.org/pdf/1902.09630.pdf # ensure predict's bbox form bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) # calculate the intersection box xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0]) yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1]) xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2]) yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3]) w = np.maximum(0., xx2 - xx1) h = np.maximum(0., yy2 - yy1) wh = w * h iou = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1]) + (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1]) - wh) centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 inner_diag = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 xxc1 = np.minimum(bboxes1[..., 0], bboxes2[..., 0]) yyc1 = np.minimum(bboxes1[..., 1], bboxes2[..., 1]) xxc2 = np.maximum(bboxes1[..., 2], bboxes2[..., 2]) yyc2 = np.maximum(bboxes1[..., 3], bboxes2[..., 3]) outer_diag = (xxc2 - xxc1) ** 2 + (yyc2 - yyc1) ** 2 w1 = bboxes1[..., 2] - bboxes1[..., 0] h1 = bboxes1[..., 3] - bboxes1[..., 1] w2 = bboxes2[..., 2] - bboxes2[..., 0] h2 = bboxes2[..., 3] - bboxes2[..., 1] # prevent dividing over zero. add one pixel shift h2 = h2 + 1. h1 = h1 + 1. arctan = np.arctan(w2/h2) - np.arctan(w1/h1) v = (4 / (np.pi ** 2)) * (arctan ** 2) S = 1 - iou alpha = v / (S+v) ciou = iou - inner_diag / outer_diag - alpha * v return (ciou + 1) / 2.0 # resize from (-1,1) to (0,1) def ct_dist(bboxes1, bboxes2): """ Measure the center distance between two sets of bounding boxes, this is a coarse implementation, we don't recommend using it only for association, which can be unstable and sensitive to frame rate and object speed. """ bboxes2 = np.expand_dims(bboxes2, 0) bboxes1 = np.expand_dims(bboxes1, 1) centerx1 = (bboxes1[..., 0] + bboxes1[..., 2]) / 2.0 centery1 = (bboxes1[..., 1] + bboxes1[..., 3]) / 2.0 centerx2 = (bboxes2[..., 0] + bboxes2[..., 2]) / 2.0 centery2 = (bboxes2[..., 1] + bboxes2[..., 3]) / 2.0 ct_dist2 = (centerx1 - centerx2) ** 2 + (centery1 - centery2) ** 2 ct_dist = np.sqrt(ct_dist2) # The linear rescaling is a naive version and needs more study ct_dist = ct_dist / ct_dist.max() return ct_dist.max() - ct_dist # resize to (0,1) def speed_direction_batch(dets, tracks): tracks = tracks[..., np.newaxis] CX1, CY1 = (dets[:,0] + dets[:,2])/2.0, (dets[:,1]+dets[:,3])/2.0 CX2, CY2 = (tracks[:,0] + tracks[:,2]) /2.0, (tracks[:,1]+tracks[:,3])/2.0 dx = CX1 - CX2 dy = CY1 - CY2 norm = np.sqrt(dx**2 + dy**2) + 1e-6 dx = dx / norm dy = dy / norm return dy, dx # size: num_track x num_det def linear_assignment(cost_matrix): try: import lap _, x, y = lap.lapjv(cost_matrix, extend_cost=True) return np.array([[y[i],i] for i in x if i >= 0]) # except ImportError: from scipy.optimize import linear_sum_assignment x, y = linear_sum_assignment(cost_matrix) return np.array(list(zip(x, y))) def associate_detections_to_trackers(detections,trackers, iou_threshold = 0.3): """ Assigns detections to tracked object (both represented as bounding boxes) Returns 3 lists of matches, unmatched_detections and unmatched_trackers """ if(len(trackers)==0): return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int) iou_matrix = iou_batch(detections, trackers) if min(iou_matrix.shape) > 0: a = (iou_matrix > iou_threshold).astype(np.int32) if a.sum(1).max() == 1 and a.sum(0).max() == 1: matched_indices = np.stack(np.where(a), axis=1) else: matched_indices = linear_assignment(-iou_matrix) else: matched_indices = np.empty(shape=(0,2)) unmatched_detections = [] for d, det in enumerate(detections): if(d not in matched_indices[:,0]): unmatched_detections.append(d) unmatched_trackers = [] for t, trk in enumerate(trackers): if(t not in matched_indices[:,1]): unmatched_trackers.append(t) #filter out matched with low IOU matches = [] for m in matched_indices: if(iou_matrix[m[0], m[1]] 0: a = (iou_matrix > iou_threshold).astype(np.int32) if a.sum(1).max() == 1 and a.sum(0).max() == 1: matched_indices = np.stack(np.where(a), axis=1) else: matched_indices = linear_assignment(-(iou_matrix+angle_diff_cost)) else: matched_indices = np.empty(shape=(0,2)) unmatched_detections = [] for d, det in enumerate(detections): if(d not in matched_indices[:,0]): unmatched_detections.append(d) unmatched_trackers = [] for t, trk in enumerate(trackers): if(t not in matched_indices[:,1]): unmatched_trackers.append(t) # filter out matched with low IOU matches = [] for m in matched_indices: if(iou_matrix[m[0], m[1]] 0: a = (iou_matrix > iou_threshold).astype(np.int32) if a.sum(1).max() == 1 and a.sum(0).max() == 1: matched_indices = np.stack(np.where(a), axis=1) else: matched_indices = linear_assignment(cost_matrix) else: matched_indices = np.empty(shape=(0,2)) unmatched_detections = [] for d, det in enumerate(detections): if(d not in matched_indices[:,0]): unmatched_detections.append(d) unmatched_trackers = [] for t, trk in enumerate(trackers): if(t not in matched_indices[:,1]): unmatched_trackers.append(t) #filter out matched with low IOU matches = [] for m in matched_indices: if(iou_matrix[m[0], m[1]]update cycle. The predict step, implemented with the method or function predict(), uses the state transition matrix F to predict the state in the next time period (epoch). The state is stored as a gaussian (x, P), where x is the state (column) vector, and P is its covariance. Covariance matrix Q specifies the process covariance. In Bayesian terms, this prediction is called the *prior*, which you can think of colloquially as the estimate prior to incorporating the measurement. The update step, implemented with the method or function `update()`, incorporates the measurement z with covariance R, into the state estimate (x, P). The class stores the system uncertainty in S, the innovation (residual between prediction and measurement in measurement space) in y, and the Kalman gain in k. The procedural form returns these variables to you. In Bayesian terms this computes the *posterior* - the estimate after the information from the measurement is incorporated. Whether you use the OO form or procedural form is up to you. If matrices such as H, R, and F are changing each epoch, you'll probably opt to use the procedural form. If they are unchanging, the OO form is perhaps easier to use since you won't need to keep track of these matrices. This is especially useful if you are implementing banks of filters or comparing various KF designs for performance; a trivial coding bug could lead to using the wrong sets of matrices. This module also offers an implementation of the RTS smoother, and other helper functions, such as log likelihood computations. The Saver class allows you to easily save the state of the KalmanFilter class after every update This module expects NumPy arrays for all values that expect arrays, although in a few cases, particularly method parameters, it will accept types that convert to NumPy arrays, such as lists of lists. These exceptions are documented in the method or function. Examples -------- The following example constructs a constant velocity kinematic filter, filters noisy data, and plots the results. It also demonstrates using the Saver class to save the state of the filter at each epoch. .. code-block:: Python import matplotlib.pyplot as plt import numpy as np from filterpy.kalman import KalmanFilter from filterpy.common import Q_discrete_white_noise, Saver r_std, q_std = 2., 0.003 cv = KalmanFilter(dim_x=2, dim_z=1) cv.x = np.array([[0., 1.]]) # position, velocity cv.F = np.array([[1, dt],[ [0, 1]]) cv.R = np.array([[r_std^^2]]) f.H = np.array([[1., 0.]]) f.P = np.diag([.1^^2, .03^^2) f.Q = Q_discrete_white_noise(2, dt, q_std**2) saver = Saver(cv) for z in range(100): cv.predict() cv.update([z + randn() * r_std]) saver.save() # save the filter's state saver.to_array() plt.plot(saver.x[:, 0]) # plot all of the priors plt.plot(saver.x_prior[:, 0]) # plot mahalanobis distance plt.figure() plt.plot(saver.mahalanobis) This code implements the same filter using the procedural form x = np.array([[0., 1.]]) # position, velocity F = np.array([[1, dt],[ [0, 1]]) R = np.array([[r_std^^2]]) H = np.array([[1., 0.]]) P = np.diag([.1^^2, .03^^2) Q = Q_discrete_white_noise(2, dt, q_std**2) for z in range(100): x, P = predict(x, P, F=F, Q=Q) x, P = update(x, P, z=[z + randn() * r_std], R=R, H=H) xs.append(x[0, 0]) plt.plot(xs) For more examples see the test subdirectory, or refer to the book cited below. In it I both teach Kalman filtering from basic principles, and teach the use of this library in great detail. FilterPy library. http://github.com/rlabbe/filterpy Documentation at: https://filterpy.readthedocs.org Supporting book at: https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python This is licensed under an MIT license. See the readme.MD file for more information. Copyright 2014-2018 Roger R Labbe Jr. """ from __future__ import absolute_import, division from copy import deepcopy from math import log, exp, sqrt import sys import numpy as np from numpy import dot, zeros, eye, isscalar, shape import numpy.linalg as linalg from filterpy.stats import logpdf from filterpy.common import pretty_str, reshape_z class KalmanFilterNew(object): """ Implements a Kalman filter. You are responsible for setting the various state variables to reasonable values; the defaults will not give you a functional filter. For now the best documentation is my free book Kalman and Bayesian Filters in Python [2]_. The test files in this directory also give you a basic idea of use, albeit without much description. In brief, you will first construct this object, specifying the size of the state vector with dim_x and the size of the measurement vector that you will be using with dim_z. These are mostly used to perform size checks when you assign values to the various matrices. For example, if you specified dim_z=2 and then try to assign a 3x3 matrix to R (the measurement noise matrix you will get an assert exception because R should be 2x2. (If for whatever reason you need to alter the size of things midstream just use the underscore version of the matrices to assign directly: your_filter._R = a_3x3_matrix.) After construction the filter will have default matrices created for you, but you must specify the values for each. It’s usually easiest to just overwrite them rather than assign to each element yourself. This will be clearer in the example below. All are of type numpy.array. Examples -------- Here is a filter that tracks position and velocity using a sensor that only reads position. First construct the object with the required dimensionality. Here the state (`dim_x`) has 2 coefficients (position and velocity), and the measurement (`dim_z`) has one. In FilterPy `x` is the state, `z` is the measurement. .. code:: from filterpy.kalman import KalmanFilter f = KalmanFilter (dim_x=2, dim_z=1) Assign the initial value for the state (position and velocity). You can do this with a two dimensional array like so: .. code:: f.x = np.array([[2.], # position [0.]]) # velocity or just use a one dimensional array, which I prefer doing. .. code:: f.x = np.array([2., 0.]) Define the state transition matrix: .. code:: f.F = np.array([[1.,1.], [0.,1.]]) Define the measurement function. Here we need to convert a position-velocity vector into just a position vector, so we use: .. code:: f.H = np.array([[1., 0.]]) Define the state's covariance matrix P. .. code:: f.P = np.array([[1000., 0.], [ 0., 1000.] ]) Now assign the measurement noise. Here the dimension is 1x1, so I can use a scalar .. code:: f.R = 5 I could have done this instead: .. code:: f.R = np.array([[5.]]) Note that this must be a 2 dimensional array. Finally, I will assign the process noise. Here I will take advantage of another FilterPy library function: .. code:: from filterpy.common import Q_discrete_white_noise f.Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13) Now just perform the standard predict/update loop: .. code:: while some_condition_is_true: z = get_sensor_reading() f.predict() f.update(z) do_something_with_estimate (f.x) **Procedural Form** This module also contains stand alone functions to perform Kalman filtering. Use these if you are not a fan of objects. **Example** .. code:: while True: z, R = read_sensor() x, P = predict(x, P, F, Q) x, P = update(x, P, z, R, H) See my book Kalman and Bayesian Filters in Python [2]_. You will have to set the following attributes after constructing this object for the filter to perform properly. Please note that there are various checks in place to ensure that you have made everything the 'correct' size. However, it is possible to provide incorrectly sized arrays such that the linear algebra can not perform an operation. It can also fail silently - you can end up with matrices of a size that allows the linear algebra to work, but are the wrong shape for the problem you are trying to solve. Parameters ---------- dim_x : int Number of state variables for the Kalman filter. For example, if you are tracking the position and velocity of an object in two dimensions, dim_x would be 4. This is used to set the default size of P, Q, and u dim_z : int Number of of measurement inputs. For example, if the sensor provides you with position in (x,y), dim_z would be 2. dim_u : int (optional) size of the control input, if it is being used. Default value of 0 indicates it is not used. compute_log_likelihood : bool (default = True) Computes log likelihood by default, but this can be a slow computation, so if you never use it you can turn this computation off. Attributes ---------- x : numpy.array(dim_x, 1) Current state estimate. Any call to update() or predict() updates this variable. P : numpy.array(dim_x, dim_x) Current state covariance matrix. Any call to update() or predict() updates this variable. x_prior : numpy.array(dim_x, 1) Prior (predicted) state estimate. The *_prior and *_post attributes are for convenience; they store the prior and posterior of the current epoch. Read Only. P_prior : numpy.array(dim_x, dim_x) Prior (predicted) state covariance matrix. Read Only. x_post : numpy.array(dim_x, 1) Posterior (updated) state estimate. Read Only. P_post : numpy.array(dim_x, dim_x) Posterior (updated) state covariance matrix. Read Only. z : numpy.array Last measurement used in update(). Read only. R : numpy.array(dim_z, dim_z) Measurement noise covariance matrix. Also known as the observation covariance. Q : numpy.array(dim_x, dim_x) Process noise covariance matrix. Also known as the transition covariance. F : numpy.array() State Transition matrix. Also known as `A` in some formulation. H : numpy.array(dim_z, dim_x) Measurement function. Also known as the observation matrix, or as `C`. y : numpy.array Residual of the update step. Read only. K : numpy.array(dim_x, dim_z) Kalman gain of the update step. Read only. S : numpy.array System uncertainty (P projected to measurement space). Read only. SI : numpy.array Inverse system uncertainty. Read only. log_likelihood : float log-likelihood of the last measurement. Read only. likelihood : float likelihood of last measurement. Read only. Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min. mahalanobis : float mahalanobis distance of the innovation. Read only. inv : function, default numpy.linalg.inv If you prefer another inverse function, such as the Moore-Penrose pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv This is only used to invert self.S. If you know it is diagonal, you might choose to set it to filterpy.common.inv_diagonal, which is several times faster than numpy.linalg.inv for diagonal matrices. alpha : float Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon [1]_. References ---------- .. [1] Dan Simon. "Optimal State Estimation." John Wiley & Sons. p. 208-212. (2006) .. [2] Roger Labbe. "Kalman and Bayesian Filters in Python" https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python """ def __init__(self, dim_x, dim_z, dim_u=0): if dim_x < 1: raise ValueError('dim_x must be 1 or greater') if dim_z < 1: raise ValueError('dim_z must be 1 or greater') if dim_u < 0: raise ValueError('dim_u must be 0 or greater') self.dim_x = dim_x self.dim_z = dim_z self.dim_u = dim_u self.x = zeros((dim_x, 1)) # state self.P = eye(dim_x) # uncertainty covariance self.Q = eye(dim_x) # process uncertainty self.B = None # control transition matrix self.F = eye(dim_x) # state transition matrix self.H = zeros((dim_z, dim_x)) # measurement function self.R = eye(dim_z) # measurement uncertainty self._alpha_sq = 1. # fading memory control self.M = np.zeros((dim_x, dim_z)) # process-measurement cross correlation self.z = np.array([[None]*self.dim_z]).T # gain and residual are computed during the innovation step. We # save them so that in case you want to inspect them for various # purposes self.K = np.zeros((dim_x, dim_z)) # kalman gain self.y = zeros((dim_z, 1)) self.S = np.zeros((dim_z, dim_z)) # system uncertainty self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty # identity matrix. Do not alter this. self._I = np.eye(dim_x) # these will always be a copy of x,P after predict() is called self.x_prior = self.x.copy() self.P_prior = self.P.copy() # these will always be a copy of x,P after update() is called self.x_post = self.x.copy() self.P_post = self.P.copy() # Only computed only if requested via property self._log_likelihood = log(sys.float_info.min) self._likelihood = sys.float_info.min self._mahalanobis = None # keep all observations self.history_obs = [] self.inv = np.linalg.inv self.attr_saved = None self.observed = False def predict(self, u=None, B=None, F=None, Q=None): """ Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- u : np.array, default 0 Optional control vector. B : np.array(dim_x, dim_u), or None Optional control transition matrix; a value of None will cause the filter to use `self.B`. F : np.array(dim_x, dim_x), or None Optional state transition matrix; a value of None will cause the filter to use `self.F`. Q : np.array(dim_x, dim_x), scalar, or None Optional process noise matrix; a value of None will cause the filter to use `self.Q`. """ if B is None: B = self.B if F is None: F = self.F if Q is None: Q = self.Q elif isscalar(Q): Q = eye(self.dim_x) * Q # x = Fx + Bu if B is not None and u is not None: self.x = dot(F, self.x) + dot(B, u) else: self.x = dot(F, self.x) # P = FPF' + Q self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q # save prior self.x_prior = self.x.copy() self.P_prior = self.P.copy() def freeze(self): """ Save the parameters before non-observation forward """ self.attr_saved = deepcopy(self.__dict__) def unfreeze(self): if self.attr_saved is not None: new_history = deepcopy(self.history_obs) self.__dict__ = self.attr_saved # self.history_obs = new_history self.history_obs = self.history_obs[:-1] occur = [int(d is None) for d in new_history] indices = np.where(np.array(occur)==0)[0] index1 = indices[-2] index2 = indices[-1] box1 = new_history[index1] x1, y1, s1, r1 = box1 w1 = np.sqrt(s1 * r1) h1 = np.sqrt(s1 / r1) box2 = new_history[index2] x2, y2, s2, r2 = box2 w2 = np.sqrt(s2 * r2) h2 = np.sqrt(s2 / r2) time_gap = index2 - index1 dx = (x2-x1)/time_gap dy = (y2-y1)/time_gap dw = (w2-w1)/time_gap dh = (h2-h1)/time_gap for i in range(index2 - index1): """ The default virtual trajectory generation is by linear motion (constant speed hypothesis), you could modify this part to implement your own. """ x = x1 + (i+1) * dx y = y1 + (i+1) * dy w = w1 + (i+1) * dw h = h1 + (i+1) * dh s = w * h r = w / float(h) new_box = np.array([x, y, s, r]).reshape((4, 1)) """ I still use predict-update loop here to refresh the parameters, but this can be faster by directly modifying the internal parameters as suggested in the paper. I keep this naive but slow way for easy read and understanding """ self.update(new_box) if not i == (index2-index1-1): self.predict() def update(self, z, R=None, H=None): """ Add a new measurement (z) to the Kalman filter. If z is None, nothing is computed. However, x_post and P_post are updated with the prior (x_prior, P_prior), and self.z is set to None. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. If you pass in a value of H, z must be a column vector the of the correct size. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. H : np.array, or None Optionally provide H to override the measurement function for this one call, otherwise self.H will be used. """ # set to None to force recompute self._log_likelihood = None self._likelihood = None self._mahalanobis = None # append the observation self.history_obs.append(z) if z is None: if self.observed: """ Got no observation so freeze the current parameters for future potential online smoothing. """ self.freeze() self.observed = False self.z = np.array([[None]*self.dim_z]).T self.x_post = self.x.copy() self.P_post = self.P.copy() self.y = zeros((self.dim_z, 1)) return # self.observed = True if not self.observed: """ Get observation, use online smoothing to re-update parameters """ self.unfreeze() self.observed = True if R is None: R = self.R elif isscalar(R): R = eye(self.dim_z) * R if H is None: z = reshape_z(z, self.dim_z, self.x.ndim) H = self.H # y = z - Hx # error (residual) between measurement and prediction self.y = z - dot(H, self.x) # common subexpression for speed PHT = dot(self.P, H.T) # S = HPH' + R # project system uncertainty into measurement space self.S = dot(H, PHT) + R self.SI = self.inv(self.S) # K = PH'inv(S) # map system uncertainty into kalman gain self.K = dot(PHT, self.SI) # x = x + Ky # predict new x with residual scaled by the kalman gain self.x = self.x + dot(self.K, self.y) # P = (I-KH)P(I-KH)' + KRK' # This is more numerically stable # and works for non-optimal K vs the equation # P = (I-KH)P usually seen in the literature. I_KH = self._I - dot(self.K, H) self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T) # save measurement and posterior state self.z = deepcopy(z) self.x_post = self.x.copy() self.P_post = self.P.copy() def predict_steadystate(self, u=0, B=None): """ Predict state (prior) using the Kalman filter state propagation equations. Only x is updated, P is left unchanged. See update_steadstate() for a longer explanation of when to use this method. Parameters ---------- u : np.array Optional control vector. If non-zero, it is multiplied by B to create the control input into the system. B : np.array(dim_x, dim_u), or None Optional control transition matrix; a value of None will cause the filter to use `self.B`. """ if B is None: B = self.B # x = Fx + Bu if B is not None: self.x = dot(self.F, self.x) + dot(B, u) else: self.x = dot(self.F, self.x) # save prior self.x_prior = self.x.copy() self.P_prior = self.P.copy() def update_steadystate(self, z): """ Add a new measurement (z) to the Kalman filter without recomputing the Kalman gain K, the state covariance P, or the system uncertainty S. You can use this for LTI systems since the Kalman gain and covariance converge to a fixed value. Precompute these and assign them explicitly, or run the Kalman filter using the normal predict()/update(0 cycle until they converge. The main advantage of this call is speed. We do significantly less computation, notably avoiding a costly matrix inversion. Use in conjunction with predict_steadystate(), otherwise P will grow without bound. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. Examples -------- >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter >>> # let filter converge on representative data, then save k and P >>> for i in range(100): >>> cv.predict() >>> cv.update([i, i, i]) >>> saved_k = np.copy(cv.K) >>> saved_P = np.copy(cv.P) later on: >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter >>> cv.K = np.copy(saved_K) >>> cv.P = np.copy(saved_P) >>> for i in range(100): >>> cv.predict_steadystate() >>> cv.update_steadystate([i, i, i]) """ # set to None to force recompute self._log_likelihood = None self._likelihood = None self._mahalanobis = None if z is None: self.z = np.array([[None]*self.dim_z]).T self.x_post = self.x.copy() self.P_post = self.P.copy() self.y = zeros((self.dim_z, 1)) return z = reshape_z(z, self.dim_z, self.x.ndim) # y = z - Hx # error (residual) between measurement and prediction self.y = z - dot(self.H, self.x) # x = x + Ky # predict new x with residual scaled by the kalman gain self.x = self.x + dot(self.K, self.y) self.z = deepcopy(z) self.x_post = self.x.copy() self.P_post = self.P.copy() # set to None to force recompute self._log_likelihood = None self._likelihood = None self._mahalanobis = None def update_correlated(self, z, R=None, H=None): """ Add a new measurement (z) to the Kalman filter assuming that process noise and measurement noise are correlated as defined in the `self.M` matrix. A partial derivation can be found in [1] If z is None, nothing is changed. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. R : np.array, scalar, or None Optionally provide R to override the measurement noise for this one call, otherwise self.R will be used. H : np.array, or None Optionally provide H to override the measurement function for this one call, otherwise self.H will be used. References ---------- .. [1] Bulut, Y. (2011). Applied Kalman filter theory (Doctoral dissertation, Northeastern University). http://people.duke.edu/~hpgavin/SystemID/References/Balut-KalmanFilter-PhD-NEU-2011.pdf """ # set to None to force recompute self._log_likelihood = None self._likelihood = None self._mahalanobis = None if z is None: self.z = np.array([[None]*self.dim_z]).T self.x_post = self.x.copy() self.P_post = self.P.copy() self.y = zeros((self.dim_z, 1)) return if R is None: R = self.R elif isscalar(R): R = eye(self.dim_z) * R # rename for readability and a tiny extra bit of speed if H is None: z = reshape_z(z, self.dim_z, self.x.ndim) H = self.H # handle special case: if z is in form [[z]] but x is not a column # vector dimensions will not match if self.x.ndim == 1 and shape(z) == (1, 1): z = z[0] if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3) z = np.asarray([z]) # y = z - Hx # error (residual) between measurement and prediction self.y = z - dot(H, self.x) # common subexpression for speed PHT = dot(self.P, H.T) # project system uncertainty into measurement space self.S = dot(H, PHT) + dot(H, self.M) + dot(self.M.T, H.T) + R self.SI = self.inv(self.S) # K = PH'inv(S) # map system uncertainty into kalman gain self.K = dot(PHT + self.M, self.SI) # x = x + Ky # predict new x with residual scaled by the kalman gain self.x = self.x + dot(self.K, self.y) self.P = self.P - dot(self.K, dot(H, self.P) + self.M.T) self.z = deepcopy(z) self.x_post = self.x.copy() self.P_post = self.P.copy() def batch_filter(self, zs, Fs=None, Qs=None, Hs=None, Rs=None, Bs=None, us=None, update_first=False, saver=None): """ Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step `self.dt`. Missing measurements must be represented by `None`. Fs : None, list-like, default=None optional value or list of values to use for the state transition matrix F. If Fs is None then self.F is used for all epochs. Otherwise it must contain a list-like list of F's, one for each epoch. This allows you to have varying F per epoch. Qs : None, np.array or list-like, default=None optional value or list of values to use for the process error covariance Q. If Qs is None then self.Q is used for all epochs. Otherwise it must contain a list-like list of Q's, one for each epoch. This allows you to have varying Q per epoch. Hs : None, np.array or list-like, default=None optional list of values to use for the measurement matrix H. If Hs is None then self.H is used for all epochs. If Hs contains a single matrix, then it is used as H for all epochs. Otherwise it must contain a list-like list of H's, one for each epoch. This allows you to have varying H per epoch. Rs : None, np.array or list-like, default=None optional list of values to use for the measurement error covariance R. If Rs is None then self.R is used for all epochs. Otherwise it must contain a list-like list of R's, one for each epoch. This allows you to have varying R per epoch. Bs : None, np.array or list-like, default=None optional list of values to use for the control transition matrix B. If Bs is None then self.B is used for all epochs. Otherwise it must contain a list-like list of B's, one for each epoch. This allows you to have varying B per epoch. us : None, np.array or list-like, default=None optional list of values to use for the control input vector; If us is None then None is used for all epochs (equivalent to 0, or no control input). Otherwise it must contain a list-like list of u's, one for each epoch. update_first : bool, optional, default=False controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means : np.array((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. means_predictions : np.array((n,dim_x,1)) array of the state for each time step after the predictions. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance_predictions : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the prediction. In other words `covariance[k,:,:]` is the covariance at step `k`. Examples -------- .. code-block:: Python # this example demonstrates tracking a measurement where the time # between measurement varies, as stored in dts. This requires # that F be recomputed for each epoch. The output is then smoothed # with an RTS smoother. zs = [t + random.randn()*4 for t in range (40)] Fs = [np.array([[1., dt], [0, 1]] for dt in dts] (mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs) (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs) """ #pylint: disable=too-many-statements n = np.size(zs, 0) if Fs is None: Fs = [self.F] * n if Qs is None: Qs = [self.Q] * n if Hs is None: Hs = [self.H] * n if Rs is None: Rs = [self.R] * n if Bs is None: Bs = [self.B] * n if us is None: us = [0] * n # mean estimates from Kalman Filter if self.x.ndim == 1: means = zeros((n, self.dim_x)) means_p = zeros((n, self.dim_x)) else: means = zeros((n, self.dim_x, 1)) means_p = zeros((n, self.dim_x, 1)) # state covariances from Kalman Filter covariances = zeros((n, self.dim_x, self.dim_x)) covariances_p = zeros((n, self.dim_x, self.dim_x)) if update_first: for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): self.update(z, R=R, H=H) means[i, :] = self.x covariances[i, :, :] = self.P self.predict(u=u, B=B, F=F, Q=Q) means_p[i, :] = self.x covariances_p[i, :, :] = self.P if saver is not None: saver.save() else: for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): self.predict(u=u, B=B, F=F, Q=Q) means_p[i, :] = self.x covariances_p[i, :, :] = self.P self.update(z, R=R, H=H) means[i, :] = self.x covariances[i, :, :] = self.P if saver is not None: saver.save() return (means, covariances, means_p, covariances_p) def rts_smoother(self, Xs, Ps, Fs=None, Qs=None, inv=np.linalg.inv): """ Runs the Rauch-Tung-Striebel Kalman smoother on a set of means and covariances computed by a Kalman filter. The usual input would come from the output of `KalmanFilter.batch_filter()`. Parameters ---------- Xs : numpy.array array of the means (state variable x) of the output of a Kalman filter. Ps : numpy.array array of the covariances of the output of a kalman filter. Fs : list-like collection of numpy.array, optional State transition matrix of the Kalman filter at each time step. Optional, if not provided the filter's self.F will be used Qs : list-like collection of numpy.array, optional Process noise of the Kalman filter at each time step. Optional, if not provided the filter's self.Q will be used inv : function, default numpy.linalg.inv If you prefer another inverse function, such as the Moore-Penrose pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv Returns ------- x : numpy.ndarray smoothed means P : numpy.ndarray smoothed state covariances K : numpy.ndarray smoother gain at each step Pp : numpy.ndarray Predicted state covariances Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = kalman.batch_filter(zs) (x, P, K, Pp) = rts_smoother(mu, cov, kf.F, kf.Q) """ if len(Xs) != len(Ps): raise ValueError('length of Xs and Ps must be the same') n = Xs.shape[0] dim_x = Xs.shape[1] if Fs is None: Fs = [self.F] * n if Qs is None: Qs = [self.Q] * n # smoother gain K = zeros((n, dim_x, dim_x)) x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy() for k in range(n-2, -1, -1): Pp[k] = dot(dot(Fs[k+1], P[k]), Fs[k+1].T) + Qs[k+1] #pylint: disable=bad-whitespace K[k] = dot(dot(P[k], Fs[k+1].T), inv(Pp[k])) x[k] += dot(K[k], x[k+1] - dot(Fs[k+1], x[k])) P[k] += dot(dot(K[k], P[k+1] - Pp[k]), K[k].T) return (x, P, K, Pp) def get_prediction(self, u=None, B=None, F=None, Q=None): """ Predict next state (prior) using the Kalman filter state propagation equations and returns it without modifying the object. Parameters ---------- u : np.array, default 0 Optional control vector. B : np.array(dim_x, dim_u), or None Optional control transition matrix; a value of None will cause the filter to use `self.B`. F : np.array(dim_x, dim_x), or None Optional state transition matrix; a value of None will cause the filter to use `self.F`. Q : np.array(dim_x, dim_x), scalar, or None Optional process noise matrix; a value of None will cause the filter to use `self.Q`. Returns ------- (x, P) : tuple State vector and covariance array of the prediction. """ if B is None: B = self.B if F is None: F = self.F if Q is None: Q = self.Q elif isscalar(Q): Q = eye(self.dim_x) * Q # x = Fx + Bu if B is not None and u is not None: x = dot(F, self.x) + dot(B, u) else: x = dot(F, self.x) # P = FPF' + Q P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q return x, P def get_update(self, z=None): """ Computes the new estimate based on measurement `z` and returns it without altering the state of the filter. Parameters ---------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. Returns ------- (x, P) : tuple State vector and covariance array of the update. """ if z is None: return self.x, self.P z = reshape_z(z, self.dim_z, self.x.ndim) R = self.R H = self.H P = self.P x = self.x # error (residual) between measurement and prediction y = z - dot(H, x) # common subexpression for speed PHT = dot(P, H.T) # project system uncertainty into measurement space S = dot(H, PHT) + R # map system uncertainty into kalman gain K = dot(PHT, self.inv(S)) # predict new x with residual scaled by the kalman gain x = x + dot(K, y) # P = (I-KH)P(I-KH)' + KRK' I_KH = self._I - dot(K, H) P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) return x, P def residual_of(self, z): """ Returns the residual for the given measurement (z). Does not alter the state of the filter. """ z = reshape_z(z, self.dim_z, self.x.ndim) return z - dot(self.H, self.x_prior) def measurement_of_state(self, x): """ Helper function that converts a state into a measurement. Parameters ---------- x : np.array kalman state vector Returns ------- z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. """ return dot(self.H, x) @property def log_likelihood(self): """ log-likelihood of the last measurement. """ if self._log_likelihood is None: self._log_likelihood = logpdf(x=self.y, cov=self.S) return self._log_likelihood @property def likelihood(self): """ Computed from the log-likelihood. The log-likelihood can be very small, meaning a large negative value such as -28000. Taking the exp() of that results in 0.0, which can break typical algorithms which multiply by this value, so by default we always return a number >= sys.float_info.min. """ if self._likelihood is None: self._likelihood = exp(self.log_likelihood) if self._likelihood == 0: self._likelihood = sys.float_info.min return self._likelihood @property def mahalanobis(self): """" Mahalanobis distance of measurement. E.g. 3 means measurement was 3 standard deviations away from the predicted value. Returns ------- mahalanobis : float """ if self._mahalanobis is None: self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y))) return self._mahalanobis @property def alpha(self): """ Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon [1]_. """ return self._alpha_sq**.5 def log_likelihood_of(self, z): """ log likelihood of the measurement `z`. This should only be called after a call to update(). Calling after predict() will yield an incorrect result.""" if z is None: return log(sys.float_info.min) return logpdf(z, dot(self.H, self.x), self.S) @alpha.setter def alpha(self, value): if not np.isscalar(value) or value < 1: raise ValueError('alpha must be a float greater than 1') self._alpha_sq = value**2 def __repr__(self): return '\n'.join([ 'KalmanFilter object', pretty_str('dim_x', self.dim_x), pretty_str('dim_z', self.dim_z), pretty_str('dim_u', self.dim_u), pretty_str('x', self.x), pretty_str('P', self.P), pretty_str('x_prior', self.x_prior), pretty_str('P_prior', self.P_prior), pretty_str('x_post', self.x_post), pretty_str('P_post', self.P_post), pretty_str('F', self.F), pretty_str('Q', self.Q), pretty_str('R', self.R), pretty_str('H', self.H), pretty_str('K', self.K), pretty_str('y', self.y), pretty_str('S', self.S), pretty_str('SI', self.SI), pretty_str('M', self.M), pretty_str('B', self.B), pretty_str('z', self.z), pretty_str('log-likelihood', self.log_likelihood), pretty_str('likelihood', self.likelihood), pretty_str('mahalanobis', self.mahalanobis), pretty_str('alpha', self.alpha), pretty_str('inv', self.inv) ]) def test_matrix_dimensions(self, z=None, H=None, R=None, F=None, Q=None): """ Performs a series of asserts to check that the size of everything is what it should be. This can help you debug problems in your design. If you pass in H, R, F, Q those will be used instead of this object's value for those matrices. Testing `z` (the measurement) is problamatic. x is a vector, and can be implemented as either a 1D array or as a nx1 column vector. Thus Hx can be of different shapes. Then, if Hx is a single value, it can be either a 1D array or 2D vector. If either is true, z can reasonably be a scalar (either '3' or np.array('3') are scalars under this definition), a 1D, 1 element array, or a 2D, 1 element array. You are allowed to pass in any combination that works. """ if H is None: H = self.H if R is None: R = self.R if F is None: F = self.F if Q is None: Q = self.Q x = self.x P = self.P assert x.ndim == 1 or x.ndim == 2, \ "x must have one or two dimensions, but has {}".format(x.ndim) if x.ndim == 1: assert x.shape[0] == self.dim_x, \ "Shape of x must be ({},{}), but is {}".format( self.dim_x, 1, x.shape) else: assert x.shape == (self.dim_x, 1), \ "Shape of x must be ({},{}), but is {}".format( self.dim_x, 1, x.shape) assert P.shape == (self.dim_x, self.dim_x), \ "Shape of P must be ({},{}), but is {}".format( self.dim_x, self.dim_x, P.shape) assert Q.shape == (self.dim_x, self.dim_x), \ "Shape of Q must be ({},{}), but is {}".format( self.dim_x, self.dim_x, P.shape) assert F.shape == (self.dim_x, self.dim_x), \ "Shape of F must be ({},{}), but is {}".format( self.dim_x, self.dim_x, F.shape) assert np.ndim(H) == 2, \ "Shape of H must be (dim_z, {}), but is {}".format( P.shape[0], shape(H)) assert H.shape[1] == P.shape[0], \ "Shape of H must be (dim_z, {}), but is {}".format( P.shape[0], H.shape) # shape of R must be the same as HPH' hph_shape = (H.shape[0], H.shape[0]) r_shape = shape(R) if H.shape[0] == 1: # r can be scalar, 1D, or 2D in this case assert r_shape in [(), (1,), (1, 1)], \ "R must be scalar or one element array, but is shaped {}".format( r_shape) else: assert r_shape == hph_shape, \ "shape of R should be {} but it is {}".format(hph_shape, r_shape) if z is not None: z_shape = shape(z) else: z_shape = (self.dim_z, 1) # H@x must have shape of z Hx = dot(H, x) if z_shape == (): # scalar or np.array(scalar) assert Hx.ndim == 1 or shape(Hx) == (1, 1), \ "shape of z should be {}, not {} for the given H".format( shape(Hx), z_shape) elif shape(Hx) == (1,): assert z_shape[0] == 1, 'Shape of z must be {} for the given H'.format(shape(Hx)) else: assert (z_shape == shape(Hx) or (len(z_shape) == 1 and shape(Hx) == (z_shape[0], 1))), \ "shape of z should be {}, not {} for the given H".format( shape(Hx), z_shape) if np.ndim(Hx) > 1 and shape(Hx) != (1, 1): assert shape(Hx) == z_shape, \ 'shape of z should be {} for the given H, but it is {}'.format( shape(Hx), z_shape) def update(x, P, z, R, H=None, return_all=False): """ Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. This can handle either the multidimensional or unidimensional case. If all parameters are floats instead of arrays the filter will still work, and return floats for x, P as the result. update(1, 2, 1, 1, 1) # univariate update(x, P, 1 Parameters ---------- x : numpy.array(dim_x, 1), or float State estimate vector P : numpy.array(dim_x, dim_x), or float Covariance matrix z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. R : numpy.array(dim_z, dim_z), or float Measurement noise matrix H : numpy.array(dim_x, dim_x), or float, optional Measurement function. If not provided, a value of 1 is assumed. return_all : bool, default False If true, y, K, S, and log_likelihood are returned, otherwise only x and P are returned. Returns ------- x : numpy.array Posterior state estimate vector P : numpy.array Posterior covariance matrix y : numpy.array or scalar Residua. Difference between measurement and state in measurement space K : numpy.array Kalman gain S : numpy.array System uncertainty in measurement space log_likelihood : float log likelihood of the measurement """ #pylint: disable=bare-except if z is None: if return_all: return x, P, None, None, None, None return x, P if H is None: H = np.array([1]) if np.isscalar(H): H = np.array([H]) Hx = np.atleast_1d(dot(H, x)) z = reshape_z(z, Hx.shape[0], x.ndim) # error (residual) between measurement and prediction y = z - Hx # project system uncertainty into measurement space S = dot(dot(H, P), H.T) + R # map system uncertainty into kalman gain try: K = dot(dot(P, H.T), linalg.inv(S)) except: # can't invert a 1D array, annoyingly K = dot(dot(P, H.T), 1./S) # predict new x with residual scaled by the kalman gain x = x + dot(K, y) # P = (I-KH)P(I-KH)' + KRK' KH = dot(K, H) try: I_KH = np.eye(KH.shape[0]) - KH except: I_KH = np.array([1 - KH]) P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) if return_all: # compute log likelihood log_likelihood = logpdf(z, dot(H, x), S) return x, P, y, K, S, log_likelihood return x, P def update_steadystate(x, z, K, H=None): """ Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. Parameters ---------- x : numpy.array(dim_x, 1), or float State estimate vector z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. K : numpy.array, or float Kalman gain matrix H : numpy.array(dim_x, dim_x), or float, optional Measurement function. If not provided, a value of 1 is assumed. Returns ------- x : numpy.array Posterior state estimate vector Examples -------- This can handle either the multidimensional or unidimensional case. If all parameters are floats instead of arrays the filter will still work, and return floats for x, P as the result. >>> update_steadystate(1, 2, 1) # univariate >>> update_steadystate(x, P, z, H) """ if z is None: return x if H is None: H = np.array([1]) if np.isscalar(H): H = np.array([H]) Hx = np.atleast_1d(dot(H, x)) z = reshape_z(z, Hx.shape[0], x.ndim) # error (residual) between measurement and prediction y = z - Hx # estimate new x with residual scaled by the kalman gain return x + dot(K, y) def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.): """ Predict next state (prior) using the Kalman filter state propagation equations. Parameters ---------- x : numpy.array State estimate vector P : numpy.array Covariance matrix F : numpy.array() State Transition matrix Q : numpy.array, Optional Process noise matrix u : numpy.array, Optional, default 0. Control vector. If non-zero, it is multiplied by B to create the control input into the system. B : numpy.array, optional, default 0. Control transition matrix. alpha : float, Optional, default=1.0 Fading memory setting. 1.0 gives the normal Kalman filter, and values slightly larger than 1.0 (such as 1.02) give a fading memory effect - previous measurements have less influence on the filter's estimates. This formulation of the Fading memory filter (there are many) is due to Dan Simon Returns ------- x : numpy.array Prior state estimate vector P : numpy.array Prior covariance matrix """ if np.isscalar(F): F = np.array(F) x = dot(F, x) + dot(B, u) P = (alpha * alpha) * dot(dot(F, P), F.T) + Q return x, P def predict_steadystate(x, F=1, u=0, B=1): """ Predict next state (prior) using the Kalman filter state propagation equations. This steady state form only computes x, assuming that the covariance is constant. Parameters ---------- x : numpy.array State estimate vector P : numpy.array Covariance matrix F : numpy.array() State Transition matrix u : numpy.array, Optional, default 0. Control vector. If non-zero, it is multiplied by B to create the control input into the system. B : numpy.array, optional, default 0. Control transition matrix. Returns ------- x : numpy.array Prior state estimate vector """ if np.isscalar(F): F = np.array(F) x = dot(F, x) + dot(B, u) return x def batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None, update_first=False, saver=None): """ Batch processes a sequences of measurements. Parameters ---------- zs : list-like list of measurements at each time step. Missing measurements must be represented by None. Fs : list-like list of values to use for the state transition matrix matrix. Qs : list-like list of values to use for the process error covariance. Hs : list-like list of values to use for the measurement matrix. Rs : list-like list of values to use for the measurement error covariance. Bs : list-like, optional list of values to use for the control transition matrix; a value of None in any position will cause the filter to use `self.B` for that time step. us : list-like, optional list of values to use for the control input vector; a value of None in any position will cause the filter to use 0 for that time step. update_first : bool, optional controls whether the order of operations is update followed by predict, or predict followed by update. Default is predict->update. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means : np.array((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. means_predictions : np.array((n,dim_x,1)) array of the state for each time step after the predictions. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance_predictions : np.array((n,dim_x,dim_x)) array of the covariances for each time step after the prediction. In other words `covariance[k,:,:]` is the covariance at step `k`. Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] Fs = [kf.F for t in range (40)] Hs = [kf.H for t in range (40)] (mu, cov, _, _) = kf.batch_filter(zs, Rs=R_list, Fs=Fs, Hs=Hs, Qs=None, Bs=None, us=None, update_first=False) (xs, Ps, Ks, Pps) = kf.rts_smoother(mu, cov, Fs=Fs, Qs=None) """ n = np.size(zs, 0) dim_x = x.shape[0] # mean estimates from Kalman Filter if x.ndim == 1: means = zeros((n, dim_x)) means_p = zeros((n, dim_x)) else: means = zeros((n, dim_x, 1)) means_p = zeros((n, dim_x, 1)) # state covariances from Kalman Filter covariances = zeros((n, dim_x, dim_x)) covariances_p = zeros((n, dim_x, dim_x)) if us is None: us = [0.] * n Bs = [0.] * n if update_first: for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): x, P = update(x, P, z, R=R, H=H) means[i, :] = x covariances[i, :, :] = P x, P = predict(x, P, u=u, B=B, F=F, Q=Q) means_p[i, :] = x covariances_p[i, :, :] = P if saver is not None: saver.save() else: for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)): x, P = predict(x, P, u=u, B=B, F=F, Q=Q) means_p[i, :] = x covariances_p[i, :, :] = P x, P = update(x, P, z, R=R, H=H) means[i, :] = x covariances[i, :, :] = P if saver is not None: saver.save() return (means, covariances, means_p, covariances_p) def rts_smoother(Xs, Ps, Fs, Qs): """ Runs the Rauch-Tung-Striebel Kalman smoother on a set of means and covariances computed by a Kalman filter. The usual input would come from the output of `KalmanFilter.batch_filter()`. Parameters ---------- Xs : numpy.array array of the means (state variable x) of the output of a Kalman filter. Ps : numpy.array array of the covariances of the output of a kalman filter. Fs : list-like collection of numpy.array State transition matrix of the Kalman filter at each time step. Qs : list-like collection of numpy.array, optional Process noise of the Kalman filter at each time step. Returns ------- x : numpy.ndarray smoothed means P : numpy.ndarray smoothed state covariances K : numpy.ndarray smoother gain at each step pP : numpy.ndarray predicted state covariances Examples -------- .. code-block:: Python zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = kalman.batch_filter(zs) (x, P, K, pP) = rts_smoother(mu, cov, kf.F, kf.Q) """ if len(Xs) != len(Ps): raise ValueError('length of Xs and Ps must be the same') n = Xs.shape[0] dim_x = Xs.shape[1] # smoother gain K = zeros((n, dim_x, dim_x)) x, P, pP = Xs.copy(), Ps.copy(), Ps.copy() for k in range(n-2, -1, -1): pP[k] = dot(dot(Fs[k], P[k]), Fs[k].T) + Qs[k] #pylint: disable=bad-whitespace K[k] = dot(dot(P[k], Fs[k].T), linalg.inv(pP[k])) x[k] += dot(K[k], x[k+1] - dot(Fs[k], x[k])) P[k] += dot(dot(K[k], P[k+1] - pP[k]), K[k].T) return (x, P, K, pP) ================================================ FILE: DLTA_AI_app/trackers/ocsort/ocsort.py ================================================ """ This script is adopted from the SORT script by Alex Bewley alex@bewley.ai """ from __future__ import print_function import numpy as np from .association import * from ultralytics.yolo.utils.ops import xywh2xyxy def k_previous_obs(observations, cur_age, k): if len(observations) == 0: return [-1, -1, -1, -1, -1] for i in range(k): dt = k - i if cur_age - dt in observations: return observations[cur_age-dt] max_age = max(observations.keys()) return observations[max_age] def convert_bbox_to_z(bbox): """ Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is the aspect ratio """ w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] x = bbox[0] + w/2. y = bbox[1] + h/2. s = w * h # scale is just area r = w / float(h+1e-6) return np.array([x, y, s, r]).reshape((4, 1)) def convert_x_to_bbox(x, score=None): """ Takes a bounding box in the centre form [x,y,s,r] and returns it in the form [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right """ w = np.sqrt(x[2] * x[3]) h = x[2] / w if(score == None): return np.array([x[0]-w/2., x[1]-h/2., x[0]+w/2., x[1]+h/2.]).reshape((1, 4)) else: return np.array([x[0]-w/2., x[1]-h/2., x[0]+w/2., x[1]+h/2., score]).reshape((1, 5)) def speed_direction(bbox1, bbox2): cx1, cy1 = (bbox1[0]+bbox1[2]) / 2.0, (bbox1[1]+bbox1[3])/2.0 cx2, cy2 = (bbox2[0]+bbox2[2]) / 2.0, (bbox2[1]+bbox2[3])/2.0 speed = np.array([cy2-cy1, cx2-cx1]) norm = np.sqrt((cy2-cy1)**2 + (cx2-cx1)**2) + 1e-6 return speed / norm class KalmanBoxTracker(object): """ This class represents the internal state of individual tracked objects observed as bbox. """ count = 0 def __init__(self, bbox, cls, delta_t=3, orig=False): """ Initialises a tracker using initial bounding box. """ # define constant velocity model if not orig: from .kalmanfilter import KalmanFilterNew as KalmanFilter self.kf = KalmanFilter(dim_x=7, dim_z=4) else: from filterpy.kalman import KalmanFilter self.kf = KalmanFilter(dim_x=7, dim_z=4) self.kf.F = np.array([[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [ 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]]) self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]]) self.kf.R[2:, 2:] *= 10. self.kf.P[4:, 4:] *= 1000. # give high uncertainty to the unobservable initial velocities self.kf.P *= 10. self.kf.Q[-1, -1] *= 0.01 self.kf.Q[4:, 4:] *= 0.01 self.kf.x[:4] = convert_bbox_to_z(bbox) self.time_since_update = 0 self.id = KalmanBoxTracker.count KalmanBoxTracker.count += 1 self.history = [] self.hits = 0 self.hit_streak = 0 self.age = 0 self.conf = bbox[-1] self.cls = cls """ NOTE: [-1,-1,-1,-1,-1] is a compromising placeholder for non-observation status, the same for the return of function k_previous_obs. It is ugly and I do not like it. But to support generate observation array in a fast and unified way, which you would see below k_observations = np.array([k_previous_obs(...]]), let's bear it for now. """ self.last_observation = np.array([-1, -1, -1, -1, -1]) # placeholder self.observations = dict() self.history_observations = [] self.velocity = None self.delta_t = delta_t def update(self, bbox, cls): """ Updates the state vector with observed bbox. """ if bbox is not None: self.conf = bbox[-1] self.cls = cls if self.last_observation.sum() >= 0: # no previous observation previous_box = None for i in range(self.delta_t): dt = self.delta_t - i if self.age - dt in self.observations: previous_box = self.observations[self.age-dt] break if previous_box is None: previous_box = self.last_observation """ Estimate the track speed direction with observations \Delta t steps away """ self.velocity = speed_direction(previous_box, bbox) """ Insert new observations. This is a ugly way to maintain both self.observations and self.history_observations. Bear it for the moment. """ self.last_observation = bbox self.observations[self.age] = bbox self.history_observations.append(bbox) self.time_since_update = 0 self.history = [] self.hits += 1 self.hit_streak += 1 self.kf.update(convert_bbox_to_z(bbox)) else: self.kf.update(bbox) def predict(self): """ Advances the state vector and returns the predicted bounding box estimate. """ if((self.kf.x[6]+self.kf.x[2]) <= 0): self.kf.x[6] *= 0.0 self.kf.predict() self.age += 1 if(self.time_since_update > 0): self.hit_streak = 0 self.time_since_update += 1 self.history.append(convert_x_to_bbox(self.kf.x)) return self.history[-1] def get_state(self): """ Returns the current bounding box estimate. """ return convert_x_to_bbox(self.kf.x) """ We support multiple ways for association cost calculation, by default we use IoU. GIoU may have better performance in some situations. We note that we hardly normalize the cost by all methods to (0,1) which may not be the best practice. """ ASSO_FUNCS = { "iou": iou_batch, "giou": giou_batch, "ciou": ciou_batch, "diou": diou_batch, "ct_dist": ct_dist} class OCSort(object): def __init__(self, det_thresh, max_age=30, min_hits=3, iou_threshold=0.3, delta_t=3, asso_func="iou", inertia=0.2, use_byte=False): """ Sets key parameters for SORT """ self.max_age = max_age self.min_hits = min_hits self.iou_threshold = iou_threshold self.trackers = [] self.frame_count = 0 self.det_thresh = det_thresh self.delta_t = delta_t self.asso_func = ASSO_FUNCS[asso_func] self.inertia = inertia self.use_byte = use_byte KalmanBoxTracker.count = 0 def update(self, dets, _): """ Params: dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...] Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections). Returns the a similar array, where the last column is the object ID. NOTE: The number of objects returned may differ from the number of detections provided. """ self.frame_count += 1 xyxys = dets[:, 0:4] confs = dets[:, 4] clss = dets[:, 5] classes = clss.numpy() xyxys = xyxys.numpy() confs = confs.numpy() output_results = np.column_stack((xyxys, confs, classes)) inds_low = confs > 0.1 inds_high = confs < self.det_thresh inds_second = np.logical_and(inds_low, inds_high) # self.det_thresh > score > 0.1, for second matching dets_second = output_results[inds_second] # detections for second matching remain_inds = confs > self.det_thresh dets = output_results[remain_inds] # get predicted locations from existing trackers. trks = np.zeros((len(self.trackers), 5)) to_del = [] ret = [] for t, trk in enumerate(trks): pos = self.trackers[t].predict()[0] trk[:] = [pos[0], pos[1], pos[2], pos[3], 0] if np.any(np.isnan(pos)): to_del.append(t) trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) for t in reversed(to_del): self.trackers.pop(t) velocities = np.array( [trk.velocity if trk.velocity is not None else np.array((0, 0)) for trk in self.trackers]) last_boxes = np.array([trk.last_observation for trk in self.trackers]) k_observations = np.array( [k_previous_obs(trk.observations, trk.age, self.delta_t) for trk in self.trackers]) """ First round of association """ matched, unmatched_dets, unmatched_trks = associate( dets, trks, self.iou_threshold, velocities, k_observations, self.inertia) for m in matched: self.trackers[m[1]].update(dets[m[0], :5], dets[m[0], 5]) """ Second round of associaton by OCR """ # BYTE association if self.use_byte and len(dets_second) > 0 and unmatched_trks.shape[0] > 0: u_trks = trks[unmatched_trks] iou_left = self.asso_func(dets_second, u_trks) # iou between low score detections and unmatched tracks iou_left = np.array(iou_left) if iou_left.max() > self.iou_threshold: """ NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may get a higher performance especially on MOT17/MOT20 datasets. But we keep it uniform here for simplicity """ matched_indices = linear_assignment(-iou_left) to_remove_trk_indices = [] for m in matched_indices: det_ind, trk_ind = m[0], unmatched_trks[m[1]] if iou_left[m[0], m[1]] < self.iou_threshold: continue self.trackers[trk_ind].update(dets_second[det_ind, :5], dets_second[det_ind, 5]) to_remove_trk_indices.append(trk_ind) unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices)) if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0: left_dets = dets[unmatched_dets] left_trks = last_boxes[unmatched_trks] iou_left = self.asso_func(left_dets, left_trks) iou_left = np.array(iou_left) if iou_left.max() > self.iou_threshold: """ NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may get a higher performance especially on MOT17/MOT20 datasets. But we keep it uniform here for simplicity """ rematched_indices = linear_assignment(-iou_left) to_remove_det_indices = [] to_remove_trk_indices = [] for m in rematched_indices: det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[1]] if iou_left[m[0], m[1]] < self.iou_threshold: continue self.trackers[trk_ind].update(dets[det_ind, :5], dets[det_ind, 5]) to_remove_det_indices.append(det_ind) to_remove_trk_indices.append(trk_ind) unmatched_dets = np.setdiff1d(unmatched_dets, np.array(to_remove_det_indices)) unmatched_trks = np.setdiff1d(unmatched_trks, np.array(to_remove_trk_indices)) for m in unmatched_trks: self.trackers[m].update(None, None) # create and initialise new trackers for unmatched detections for i in unmatched_dets: trk = KalmanBoxTracker(dets[i, :5], dets[i, 5], delta_t=self.delta_t) self.trackers.append(trk) i = len(self.trackers) for trk in reversed(self.trackers): if trk.last_observation.sum() < 0: d = trk.get_state()[0] else: """ this is optional to use the recent observation or the kalman filter prediction, we didn't notice significant difference here """ d = trk.last_observation[:4] if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits): # +1 as MOT benchmark requires positive ret.append(np.concatenate((d, [trk.id+1], [trk.cls], [trk.conf])).reshape(1, -1)) i -= 1 # remove dead tracklet if(trk.time_since_update > self.max_age): self.trackers.pop(i) if(len(ret) > 0): return np.concatenate(ret) return np.empty((0, 5)) ================================================ FILE: DLTA_AI_app/trackers/reid_export.py ================================================ import argparse import os # limit the number of cpus used by high performance libraries os.environ["OMP_NUM_THREADS"] = "1" os.environ["OPENBLAS_NUM_THREADS"] = "1" os.environ["MKL_NUM_THREADS"] = "1" os.environ["VECLIB_MAXIMUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" import sys import numpy as np from pathlib import Path import torch import time import platform import pandas as pd import subprocess import torch.backends.cudnn as cudnn from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() ROOT = FILE.parents[0].parents[0] # yolov5 strongsort root directory WEIGHTS = ROOT / 'weights' if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH if str(ROOT / 'yolov5') not in sys.path: sys.path.append(str(ROOT / 'yolov5')) # add yolov5 ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative import logging from ultralytics.yolo.utils.torch_utils import select_device from ultralytics.yolo.utils import LOGGER, colorstr, ops from ultralytics.yolo.utils.checks import check_requirements, check_version from trackers.strongsort.deep.models import build_model from trackers.strongsort.deep.reid_model_factory import get_model_name, load_pretrained_weights def file_size(path): # Return file/dir size (MB) path = Path(path) if path.is_file(): return path.stat().st_size / 1E6 elif path.is_dir(): return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 else: return 0.0 def export_formats(): # YOLOv5 export formats x = [ ['PyTorch', '-', '.pt', True, True], ['TorchScript', 'torchscript', '.torchscript', True, True], ['ONNX', 'onnx', '.onnx', True, True], ['OpenVINO', 'openvino', '_openvino_model', True, False], ['TensorRT', 'engine', '.engine', False, True], ['TensorFlow Lite', 'tflite', '.tflite', True, False], ] return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): # YOLOv5 TorchScript model export try: LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html optimize_for_mobile(ts)._save_for_lite_interpreter(str(f)) else: ts.save(str(f)) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'{prefix} export failure: {e}') def export_onnx(model, im, file, opset, dynamic, fp16, simplify, prefix=colorstr('ONNX:')): # ONNX export try: check_requirements(('onnx',)) import onnx f = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') if dynamic: dynamic = {'images': {0: 'batch'}, 'output': {0: 'batch'}} # input --> shape(1,3,640,640), output --> shape(1,25200,85) torch.onnx.export( model.half() if fp16 else model.cpu(), im.half() if fp16 else im.cpu(), f, verbose=False, opset_version=opset, do_constant_folding=True, input_names=['images'], output_names=['output'], dynamic_axes=dynamic or None ) # Checks model_onnx = onnx.load(f) # load onnx model onnx.checker.check_model(model_onnx) # check onnx model onnx.save(model_onnx, f) # Simplify if simplify: try: cuda = torch.cuda.is_available() check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) import onnxsim LOGGER.info(f'simplifying with onnx-simplifier {onnxsim.__version__}...') model_onnx, check = onnxsim.simplify(model_onnx) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: LOGGER.info(f'simplifier failure: {e}') LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'export failure: {e}') def export_openvino(file, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.inference_engine as ie try: LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') f = str(file).replace('.pt', f'_openvino_model{os.sep}') cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" subprocess.check_output(cmd.split()) # export except Exception as e: LOGGER.info(f'export failure: {e}') LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f def export_tflite(file, half, prefix=colorstr('TFLite:')): # YOLOv5 OpenVINO export try: check_requirements( ('openvino2tensorflow', 'tensorflow', 'tensorflow_datasets')) # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.inference_engine as ie LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') output = Path(str(file).replace(f'_openvino_model{os.sep}', f'_tflite_model{os.sep}')) modelxml = list(Path(file).glob('*.xml'))[0] cmd = f"openvino2tensorflow \ --model_path {modelxml} \ --model_output_path {output} \ --output_pb \ --output_saved_model \ --output_no_quant_float32_tflite \ --output_dynamic_range_quant_tflite" subprocess.check_output(cmd.split()) # export LOGGER.info(f'{prefix} export success, results saved in {output} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt try: assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' try: import tensorrt as trt except Exception: if platform.system() == 'Linux': check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) import tensorrt as trt if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] export_onnx(model, im, file, 12, dynamic, half, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 export_onnx(model, im, file, 12, dynamic, half, simplify) # opset 13 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') assert onnx.exists(), f'failed to export ONNX file: {onnx}' f = file.with_suffix('.engine') # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if verbose: logger.min_severity = trt.Logger.Severity.VERBOSE builder = trt.Builder(logger) config = builder.create_builder_config() config.max_workspace_size = workspace * 1 << 30 # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) network = builder.create_network(flag) parser = trt.OnnxParser(network, logger) if not parser.parse_from_file(str(onnx)): raise RuntimeError(f'failed to load ONNX file: {onnx}') inputs = [network.get_input(i) for i in range(network.num_inputs)] outputs = [network.get_output(i) for i in range(network.num_outputs)] LOGGER.info(f'{prefix} Network Description:') for inp in inputs: LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') for out in outputs: LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') if dynamic: if im.shape[0] <= 1: LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: if half: inp.dtype = trt.float16 profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) config.add_optimization_profile(profile) LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') if builder.platform_has_fast_fp16 and half: config.set_flag(trt.BuilderFlag.FP16) config.default_device_type = trt.DeviceType.GPU with builder.build_engine(network, config) as engine, open(f, 'wb') as t: t.write(engine.serialize()) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') if __name__ == "__main__": parser = argparse.ArgumentParser(description="ReID export") parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[256, 128], help='image (h, w)') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--weights', nargs='+', type=str, default=WEIGHTS / 'osnet_x0_25_msmt17.pt', help='model.pt path(s)') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--include', nargs='+', default=['torchscript'], help='torchscript, onnx, openvino, engine') args = parser.parse_args() t = time.time() include = [x.lower() for x in args.include] # to lowercase fmts = tuple(export_formats()['Argument'][1:]) # --include arguments flags = [x in include for x in fmts] assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' jit, onnx, openvino, engine, tflite = flags # export booleans args.device = select_device(args.device) if args.half: assert args.device.type != 'cpu', '--half only compatible with GPU export, i.e. use --device 0' # assert not args.dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' if type(args.weights) is list: args.weights = Path(args.weights[0]) model = build_model( get_model_name(args.weights), num_classes=1, pretrained=not (args.weights and args.weights.is_file() and args.weights.suffix == '.pt'), use_gpu=args.device ).to(args.device) load_pretrained_weights(model, args.weights) model.eval() if args.optimize: assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' im = torch.zeros(args.batch_size, 3, args.imgsz[0], args.imgsz[1]).to(args.device) # image size(1,3,640,480) BCHW iDetection for _ in range(2): y = model(im) # dry runs if args.half: im, model = im.half(), model.half() # to FP16 shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {args.weights} with output shape {shape} ({file_size(args.weights):.1f} MB)") # Exports f = [''] * len(fmts) # exported filenames if jit: f[0] = export_torchscript(model, im, args.weights, args.optimize) # opset 12 if engine: # TensorRT required before ONNX f[1] = export_engine(model, im, args.weights, args.half, args.dynamic, args.simplify, args.workspace, args.verbose) if onnx: # OpenVINO requires ONNX f[2] = export_onnx(model, im, args.weights, args.opset, args.dynamic, args.half, args.simplify) # opset 12 if openvino: f[3] = export_openvino(args.weights, args.half) if tflite: export_tflite(f, False) # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', args.weights.parent.resolve())}" f"\nVisualize: https://netron.app") ================================================ FILE: DLTA_AI_app/trackers/strongsort/.gitignore ================================================ # Folders __pycache__/ build/ *.egg-info # Files *.weights *.t7 *.mp4 *.avi *.so *.txt ================================================ FILE: DLTA_AI_app/trackers/strongsort/__init__.py ================================================ ================================================ FILE: DLTA_AI_app/trackers/strongsort/configs/strongsort.yaml ================================================ strongsort: ecc: true ema_alpha: 0.8962157769329083 max_age: 40 max_dist: 0.1594374041012136 max_iou_dist: 0.5431835667667874 max_unmatched_preds: 0 mc_lambda: 0.995 n_init: 3 nn_budget: 100 conf_thres: 0.5122620708221085 ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/checkpoint/.gitkeep ================================================ ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/__init__.py ================================================ from __future__ import absolute_import import torch from .pcb import * from .mlfn import * from .hacnn import * from .osnet import * from .senet import * from .mudeep import * from .nasnet import * from .resnet import * from .densenet import * from .xception import * from .osnet_ain import * from .resnetmid import * from .shufflenet import * from .squeezenet import * from .inceptionv4 import * from .mobilenetv2 import * from .resnet_ibn_a import * from .resnet_ibn_b import * from .shufflenetv2 import * from .inceptionresnetv2 import * __model_factory = { # image classification models 'resnet18': resnet18, 'resnet34': resnet34, 'resnet50': resnet50, 'resnet101': resnet101, 'resnet152': resnet152, 'resnext50_32x4d': resnext50_32x4d, 'resnext101_32x8d': resnext101_32x8d, 'resnet50_fc512': resnet50_fc512, 'se_resnet50': se_resnet50, 'se_resnet50_fc512': se_resnet50_fc512, 'se_resnet101': se_resnet101, 'se_resnext50_32x4d': se_resnext50_32x4d, 'se_resnext101_32x4d': se_resnext101_32x4d, 'densenet121': densenet121, 'densenet169': densenet169, 'densenet201': densenet201, 'densenet161': densenet161, 'densenet121_fc512': densenet121_fc512, 'inceptionresnetv2': inceptionresnetv2, 'inceptionv4': inceptionv4, 'xception': xception, 'resnet50_ibn_a': resnet50_ibn_a, 'resnet50_ibn_b': resnet50_ibn_b, # lightweight models 'nasnsetmobile': nasnetamobile, 'mobilenetv2_x1_0': mobilenetv2_x1_0, 'mobilenetv2_x1_4': mobilenetv2_x1_4, 'shufflenet': shufflenet, 'squeezenet1_0': squeezenet1_0, 'squeezenet1_0_fc512': squeezenet1_0_fc512, 'squeezenet1_1': squeezenet1_1, 'shufflenet_v2_x0_5': shufflenet_v2_x0_5, 'shufflenet_v2_x1_0': shufflenet_v2_x1_0, 'shufflenet_v2_x1_5': shufflenet_v2_x1_5, 'shufflenet_v2_x2_0': shufflenet_v2_x2_0, # reid-specific models 'mudeep': MuDeep, 'resnet50mid': resnet50mid, 'hacnn': HACNN, 'pcb_p6': pcb_p6, 'pcb_p4': pcb_p4, 'mlfn': mlfn, 'osnet_x1_0': osnet_x1_0, 'osnet_x0_75': osnet_x0_75, 'osnet_x0_5': osnet_x0_5, 'osnet_x0_25': osnet_x0_25, 'osnet_ibn_x1_0': osnet_ibn_x1_0, 'osnet_ain_x1_0': osnet_ain_x1_0, 'osnet_ain_x0_75': osnet_ain_x0_75, 'osnet_ain_x0_5': osnet_ain_x0_5, 'osnet_ain_x0_25': osnet_ain_x0_25 } def show_avai_models(): """Displays available models. Examples:: >>> from torchreid import models >>> models.show_avai_models() """ print(list(__model_factory.keys())) def build_model( name, num_classes, loss='softmax', pretrained=True, use_gpu=True ): """A function wrapper for building a model. Args: name (str): model name. num_classes (int): number of training identities. loss (str, optional): loss function to optimize the model. Currently supports "softmax" and "triplet". Default is "softmax". pretrained (bool, optional): whether to load ImageNet-pretrained weights. Default is True. use_gpu (bool, optional): whether to use gpu. Default is True. Returns: nn.Module Examples:: >>> from torchreid import models >>> model = models.build_model('resnet50', 751, loss='softmax') """ avai_models = list(__model_factory.keys()) if name not in avai_models: raise KeyError( 'Unknown model: {}. Must be one of {}'.format(name, avai_models) ) return __model_factory[name]( num_classes=num_classes, loss=loss, pretrained=pretrained, use_gpu=use_gpu ) ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/densenet.py ================================================ """ Code source: https://github.com/pytorch/vision """ from __future__ import division, absolute_import import re from collections import OrderedDict import torch import torch.nn as nn from torch.nn import functional as F from torch.utils import model_zoo __all__ = [ 'densenet121', 'densenet169', 'densenet201', 'densenet161', 'densenet121_fc512' ] model_urls = { 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth', 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth', } class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): super(_DenseLayer, self).__init__() self.add_module('norm1', nn.BatchNorm2d(num_input_features)), self.add_module('relu1', nn.ReLU(inplace=True)), self.add_module( 'conv1', nn.Conv2d( num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False ) ), self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), self.add_module('relu2', nn.ReLU(inplace=True)), self.add_module( 'conv2', nn.Conv2d( bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False ) ), self.drop_rate = drop_rate def forward(self, x): new_features = super(_DenseLayer, self).forward(x) if self.drop_rate > 0: new_features = F.dropout( new_features, p=self.drop_rate, training=self.training ) return torch.cat([x, new_features], 1) class _DenseBlock(nn.Sequential): def __init__( self, num_layers, num_input_features, bn_size, growth_rate, drop_rate ): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i*growth_rate, growth_rate, bn_size, drop_rate ) self.add_module('denselayer%d' % (i+1), layer) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features): super(_Transition, self).__init__() self.add_module('norm', nn.BatchNorm2d(num_input_features)) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module( 'conv', nn.Conv2d( num_input_features, num_output_features, kernel_size=1, stride=1, bias=False ) ) self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) class DenseNet(nn.Module): """Densely connected network. Reference: Huang et al. Densely Connected Convolutional Networks. CVPR 2017. Public keys: - ``densenet121``: DenseNet121. - ``densenet169``: DenseNet169. - ``densenet201``: DenseNet201. - ``densenet161``: DenseNet161. - ``densenet121_fc512``: DenseNet121 + FC. """ def __init__( self, num_classes, loss, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, fc_dims=None, dropout_p=None, **kwargs ): super(DenseNet, self).__init__() self.loss = loss # First convolution self.features = nn.Sequential( OrderedDict( [ ( 'conv0', nn.Conv2d( 3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False ) ), ('norm0', nn.BatchNorm2d(num_init_features)), ('relu0', nn.ReLU(inplace=True)), ( 'pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1) ), ] ) ) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock( num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate ) self.features.add_module('denseblock%d' % (i+1), block) num_features = num_features + num_layers*growth_rate if i != len(block_config) - 1: trans = _Transition( num_input_features=num_features, num_output_features=num_features // 2 ) self.features.add_module('transition%d' % (i+1), trans) num_features = num_features // 2 # Final batch norm self.features.add_module('norm5', nn.BatchNorm2d(num_features)) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.feature_dim = num_features self.fc = self._construct_fc_layer(fc_dims, num_features, dropout_p) # Linear layer self.classifier = nn.Linear(self.feature_dim, num_classes) self._init_params() def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """Constructs fully connected layer. Args: fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed input_dim (int): input dimension dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): f = self.features(x) f = F.relu(f, inplace=True) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) # '.'s are no longer allowed in module names, but pervious _DenseLayer # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. # They are also in the checkpoints in model_urls. This pattern is used # to find such keys. pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$' ) for key in list(pretrain_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) pretrain_dict[new_key] = pretrain_dict[key] del pretrain_dict[key] model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) """ Dense network configurations: -- densenet121: num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16) densenet169: num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32) densenet201: num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32) densenet161: num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24) """ def densenet121(num_classes, loss='softmax', pretrained=True, **kwargs): model = DenseNet( num_classes=num_classes, loss=loss, num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['densenet121']) return model def densenet169(num_classes, loss='softmax', pretrained=True, **kwargs): model = DenseNet( num_classes=num_classes, loss=loss, num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['densenet169']) return model def densenet201(num_classes, loss='softmax', pretrained=True, **kwargs): model = DenseNet( num_classes=num_classes, loss=loss, num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['densenet201']) return model def densenet161(num_classes, loss='softmax', pretrained=True, **kwargs): model = DenseNet( num_classes=num_classes, loss=loss, num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['densenet161']) return model def densenet121_fc512(num_classes, loss='softmax', pretrained=True, **kwargs): model = DenseNet( num_classes=num_classes, loss=loss, num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), fc_dims=[512], dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['densenet121']) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/hacnn.py ================================================ from __future__ import division, absolute_import import torch from torch import nn from torch.nn import functional as F __all__ = ['HACNN'] class ConvBlock(nn.Module): """Basic convolutional block. convolution + batch normalization + relu. Args: in_c (int): number of input channels. out_c (int): number of output channels. k (int or tuple): kernel size. s (int or tuple): stride. p (int or tuple): padding. """ def __init__(self, in_c, out_c, k, s=1, p=0): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p) self.bn = nn.BatchNorm2d(out_c) def forward(self, x): return F.relu(self.bn(self.conv(x))) class InceptionA(nn.Module): def __init__(self, in_channels, out_channels): super(InceptionA, self).__init__() mid_channels = out_channels // 4 self.stream1 = nn.Sequential( ConvBlock(in_channels, mid_channels, 1), ConvBlock(mid_channels, mid_channels, 3, p=1), ) self.stream2 = nn.Sequential( ConvBlock(in_channels, mid_channels, 1), ConvBlock(mid_channels, mid_channels, 3, p=1), ) self.stream3 = nn.Sequential( ConvBlock(in_channels, mid_channels, 1), ConvBlock(mid_channels, mid_channels, 3, p=1), ) self.stream4 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1), ConvBlock(in_channels, mid_channels, 1), ) def forward(self, x): s1 = self.stream1(x) s2 = self.stream2(x) s3 = self.stream3(x) s4 = self.stream4(x) y = torch.cat([s1, s2, s3, s4], dim=1) return y class InceptionB(nn.Module): def __init__(self, in_channels, out_channels): super(InceptionB, self).__init__() mid_channels = out_channels // 4 self.stream1 = nn.Sequential( ConvBlock(in_channels, mid_channels, 1), ConvBlock(mid_channels, mid_channels, 3, s=2, p=1), ) self.stream2 = nn.Sequential( ConvBlock(in_channels, mid_channels, 1), ConvBlock(mid_channels, mid_channels, 3, p=1), ConvBlock(mid_channels, mid_channels, 3, s=2, p=1), ) self.stream3 = nn.Sequential( nn.MaxPool2d(3, stride=2, padding=1), ConvBlock(in_channels, mid_channels * 2, 1), ) def forward(self, x): s1 = self.stream1(x) s2 = self.stream2(x) s3 = self.stream3(x) y = torch.cat([s1, s2, s3], dim=1) return y class SpatialAttn(nn.Module): """Spatial Attention (Sec. 3.1.I.1)""" def __init__(self): super(SpatialAttn, self).__init__() self.conv1 = ConvBlock(1, 1, 3, s=2, p=1) self.conv2 = ConvBlock(1, 1, 1) def forward(self, x): # global cross-channel averaging x = x.mean(1, keepdim=True) # 3-by-3 conv x = self.conv1(x) # bilinear resizing x = F.upsample( x, (x.size(2) * 2, x.size(3) * 2), mode='bilinear', align_corners=True ) # scaling conv x = self.conv2(x) return x class ChannelAttn(nn.Module): """Channel Attention (Sec. 3.1.I.2)""" def __init__(self, in_channels, reduction_rate=16): super(ChannelAttn, self).__init__() assert in_channels % reduction_rate == 0 self.conv1 = ConvBlock(in_channels, in_channels // reduction_rate, 1) self.conv2 = ConvBlock(in_channels // reduction_rate, in_channels, 1) def forward(self, x): # squeeze operation (global average pooling) x = F.avg_pool2d(x, x.size()[2:]) # excitation operation (2 conv layers) x = self.conv1(x) x = self.conv2(x) return x class SoftAttn(nn.Module): """Soft Attention (Sec. 3.1.I) Aim: Spatial Attention + Channel Attention Output: attention maps with shape identical to input. """ def __init__(self, in_channels): super(SoftAttn, self).__init__() self.spatial_attn = SpatialAttn() self.channel_attn = ChannelAttn(in_channels) self.conv = ConvBlock(in_channels, in_channels, 1) def forward(self, x): y_spatial = self.spatial_attn(x) y_channel = self.channel_attn(x) y = y_spatial * y_channel y = torch.sigmoid(self.conv(y)) return y class HardAttn(nn.Module): """Hard Attention (Sec. 3.1.II)""" def __init__(self, in_channels): super(HardAttn, self).__init__() self.fc = nn.Linear(in_channels, 4 * 2) self.init_params() def init_params(self): self.fc.weight.data.zero_() self.fc.bias.data.copy_( torch.tensor( [0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float ) ) def forward(self, x): # squeeze operation (global average pooling) x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), x.size(1)) # predict transformation parameters theta = torch.tanh(self.fc(x)) theta = theta.view(-1, 4, 2) return theta class HarmAttn(nn.Module): """Harmonious Attention (Sec. 3.1)""" def __init__(self, in_channels): super(HarmAttn, self).__init__() self.soft_attn = SoftAttn(in_channels) self.hard_attn = HardAttn(in_channels) def forward(self, x): y_soft_attn = self.soft_attn(x) theta = self.hard_attn(x) return y_soft_attn, theta class HACNN(nn.Module): """Harmonious Attention Convolutional Neural Network. Reference: Li et al. Harmonious Attention Network for Person Re-identification. CVPR 2018. Public keys: - ``hacnn``: HACNN. """ # Args: # num_classes (int): number of classes to predict # nchannels (list): number of channels AFTER concatenation # feat_dim (int): feature dimension for a single stream # learn_region (bool): whether to learn region features (i.e. local branch) def __init__( self, num_classes, loss='softmax', nchannels=[128, 256, 384], feat_dim=512, learn_region=True, use_gpu=True, **kwargs ): super(HACNN, self).__init__() self.loss = loss self.learn_region = learn_region self.use_gpu = use_gpu self.conv = ConvBlock(3, 32, 3, s=2, p=1) # Construct Inception + HarmAttn blocks # ============== Block 1 ============== self.inception1 = nn.Sequential( InceptionA(32, nchannels[0]), InceptionB(nchannels[0], nchannels[0]), ) self.ha1 = HarmAttn(nchannels[0]) # ============== Block 2 ============== self.inception2 = nn.Sequential( InceptionA(nchannels[0], nchannels[1]), InceptionB(nchannels[1], nchannels[1]), ) self.ha2 = HarmAttn(nchannels[1]) # ============== Block 3 ============== self.inception3 = nn.Sequential( InceptionA(nchannels[1], nchannels[2]), InceptionB(nchannels[2], nchannels[2]), ) self.ha3 = HarmAttn(nchannels[2]) self.fc_global = nn.Sequential( nn.Linear(nchannels[2], feat_dim), nn.BatchNorm1d(feat_dim), nn.ReLU(), ) self.classifier_global = nn.Linear(feat_dim, num_classes) if self.learn_region: self.init_scale_factors() self.local_conv1 = InceptionB(32, nchannels[0]) self.local_conv2 = InceptionB(nchannels[0], nchannels[1]) self.local_conv3 = InceptionB(nchannels[1], nchannels[2]) self.fc_local = nn.Sequential( nn.Linear(nchannels[2] * 4, feat_dim), nn.BatchNorm1d(feat_dim), nn.ReLU(), ) self.classifier_local = nn.Linear(feat_dim, num_classes) self.feat_dim = feat_dim * 2 else: self.feat_dim = feat_dim def init_scale_factors(self): # initialize scale factors (s_w, s_h) for four regions self.scale_factors = [] self.scale_factors.append( torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float) ) self.scale_factors.append( torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float) ) self.scale_factors.append( torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float) ) self.scale_factors.append( torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float) ) def stn(self, x, theta): """Performs spatial transform x: (batch, channel, height, width) theta: (batch, 2, 3) """ grid = F.affine_grid(theta, x.size()) x = F.grid_sample(x, grid) return x def transform_theta(self, theta_i, region_idx): """Transforms theta to include (s_w, s_h), resulting in (batch, 2, 3)""" scale_factors = self.scale_factors[region_idx] theta = torch.zeros(theta_i.size(0), 2, 3) theta[:, :, :2] = scale_factors theta[:, :, -1] = theta_i if self.use_gpu: theta = theta.cuda() return theta def forward(self, x): assert x.size(2) == 160 and x.size(3) == 64, \ 'Input size does not match, expected (160, 64) but got ({}, {})'.format(x.size(2), x.size(3)) x = self.conv(x) # ============== Block 1 ============== # global branch x1 = self.inception1(x) x1_attn, x1_theta = self.ha1(x1) x1_out = x1 * x1_attn # local branch if self.learn_region: x1_local_list = [] for region_idx in range(4): x1_theta_i = x1_theta[:, region_idx, :] x1_theta_i = self.transform_theta(x1_theta_i, region_idx) x1_trans_i = self.stn(x, x1_theta_i) x1_trans_i = F.upsample( x1_trans_i, (24, 28), mode='bilinear', align_corners=True ) x1_local_i = self.local_conv1(x1_trans_i) x1_local_list.append(x1_local_i) # ============== Block 2 ============== # Block 2 # global branch x2 = self.inception2(x1_out) x2_attn, x2_theta = self.ha2(x2) x2_out = x2 * x2_attn # local branch if self.learn_region: x2_local_list = [] for region_idx in range(4): x2_theta_i = x2_theta[:, region_idx, :] x2_theta_i = self.transform_theta(x2_theta_i, region_idx) x2_trans_i = self.stn(x1_out, x2_theta_i) x2_trans_i = F.upsample( x2_trans_i, (12, 14), mode='bilinear', align_corners=True ) x2_local_i = x2_trans_i + x1_local_list[region_idx] x2_local_i = self.local_conv2(x2_local_i) x2_local_list.append(x2_local_i) # ============== Block 3 ============== # Block 3 # global branch x3 = self.inception3(x2_out) x3_attn, x3_theta = self.ha3(x3) x3_out = x3 * x3_attn # local branch if self.learn_region: x3_local_list = [] for region_idx in range(4): x3_theta_i = x3_theta[:, region_idx, :] x3_theta_i = self.transform_theta(x3_theta_i, region_idx) x3_trans_i = self.stn(x2_out, x3_theta_i) x3_trans_i = F.upsample( x3_trans_i, (6, 7), mode='bilinear', align_corners=True ) x3_local_i = x3_trans_i + x2_local_list[region_idx] x3_local_i = self.local_conv3(x3_local_i) x3_local_list.append(x3_local_i) # ============== Feature generation ============== # global branch x_global = F.avg_pool2d(x3_out, x3_out.size()[2:] ).view(x3_out.size(0), x3_out.size(1)) x_global = self.fc_global(x_global) # local branch if self.learn_region: x_local_list = [] for region_idx in range(4): x_local_i = x3_local_list[region_idx] x_local_i = F.avg_pool2d(x_local_i, x_local_i.size()[2:] ).view(x_local_i.size(0), -1) x_local_list.append(x_local_i) x_local = torch.cat(x_local_list, 1) x_local = self.fc_local(x_local) if not self.training: # l2 normalization before concatenation if self.learn_region: x_global = x_global / x_global.norm(p=2, dim=1, keepdim=True) x_local = x_local / x_local.norm(p=2, dim=1, keepdim=True) return torch.cat([x_global, x_local], 1) else: return x_global prelogits_global = self.classifier_global(x_global) if self.learn_region: prelogits_local = self.classifier_local(x_local) if self.loss == 'softmax': if self.learn_region: return (prelogits_global, prelogits_local) else: return prelogits_global elif self.loss == 'triplet': if self.learn_region: return (prelogits_global, prelogits_local), (x_global, x_local) else: return prelogits_global, x_global else: raise KeyError("Unsupported loss: {}".format(self.loss)) ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/inceptionresnetv2.py ================================================ """ Code imported from https://github.com/Cadene/pretrained-models.pytorch """ from __future__ import division, absolute_import import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo __all__ = ['inceptionresnetv2'] pretrained_settings = { 'inceptionresnetv2': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth', 'input_space': 'RGB', 'input_size': [3, 299, 299], 'input_range': [0, 1], 'mean': [0.5, 0.5, 0.5], 'std': [0.5, 0.5, 0.5], 'num_classes': 1000 }, 'imagenet+background': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth', 'input_space': 'RGB', 'input_size': [3, 299, 299], 'input_range': [0, 1], 'mean': [0.5, 0.5, 0.5], 'std': [0.5, 0.5, 0.5], 'num_classes': 1001 } } } class BasicConv2d(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d( in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False ) # verify bias false self.bn = nn.BatchNorm2d( out_planes, eps=0.001, # value found in tensorflow momentum=0.1, # default pytorch value affine=True ) self.relu = nn.ReLU(inplace=False) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x class Mixed_5b(nn.Module): def __init__(self): super(Mixed_5b, self).__init__() self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(192, 48, kernel_size=1, stride=1), BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2) ) self.branch2 = nn.Sequential( BasicConv2d(192, 64, kernel_size=1, stride=1), BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), BasicConv2d(192, 64, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block35(nn.Module): def __init__(self, scale=1.0): super(Block35, self).__init__() self.scale = scale self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(320, 32, kernel_size=1, stride=1), BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( BasicConv2d(320, 32, kernel_size=1, stride=1), BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1), BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1) ) self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) self.relu = nn.ReLU(inplace=False) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) out = self.conv2d(out) out = out * self.scale + x out = self.relu(out) return out class Mixed_6a(nn.Module): def __init__(self): super(Mixed_6a, self).__init__() self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( BasicConv2d(320, 256, kernel_size=1, stride=1), BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1), BasicConv2d(256, 384, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Block17(nn.Module): def __init__(self, scale=1.0): super(Block17, self).__init__() self.scale = scale self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(1088, 128, kernel_size=1, stride=1), BasicConv2d( 128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3) ), BasicConv2d( 160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0) ) ) self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) self.relu = nn.ReLU(inplace=False) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x out = self.relu(out) return out class Mixed_7a(nn.Module): def __init__(self): super(Mixed_7a, self).__init__() self.branch0 = nn.Sequential( BasicConv2d(1088, 256, kernel_size=1, stride=1), BasicConv2d(256, 384, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( BasicConv2d(1088, 256, kernel_size=1, stride=1), BasicConv2d(256, 288, kernel_size=3, stride=2) ) self.branch2 = nn.Sequential( BasicConv2d(1088, 256, kernel_size=1, stride=1), BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1), BasicConv2d(288, 320, kernel_size=3, stride=2) ) self.branch3 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block8(nn.Module): def __init__(self, scale=1.0, noReLU=False): super(Block8, self).__init__() self.scale = scale self.noReLU = noReLU self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(2080, 192, kernel_size=1, stride=1), BasicConv2d( 192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1) ), BasicConv2d( 224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) ) ) self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) if not self.noReLU: self.relu = nn.ReLU(inplace=False) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x if not self.noReLU: out = self.relu(out) return out # ---------------- # Model Definition # ---------------- class InceptionResNetV2(nn.Module): """Inception-ResNet-V2. Reference: Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning. AAAI 2017. Public keys: - ``inceptionresnetv2``: Inception-ResNet-V2. """ def __init__(self, num_classes, loss='softmax', **kwargs): super(InceptionResNetV2, self).__init__() self.loss = loss # Modules self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2) self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) self.conv2d_2b = BasicConv2d( 32, 64, kernel_size=3, stride=1, padding=1 ) self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) self.maxpool_5a = nn.MaxPool2d(3, stride=2) self.mixed_5b = Mixed_5b() self.repeat = nn.Sequential( Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17), Block35(scale=0.17) ) self.mixed_6a = Mixed_6a() self.repeat_1 = nn.Sequential( Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10), Block17(scale=0.10) ) self.mixed_7a = Mixed_7a() self.repeat_2 = nn.Sequential( Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20), Block8(scale=0.20) ) self.block8 = Block8(noReLU=True) self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(1536, num_classes) def load_imagenet_weights(self): settings = pretrained_settings['inceptionresnetv2']['imagenet'] pretrain_dict = model_zoo.load_url(settings['url']) model_dict = self.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) self.load_state_dict(model_dict) def featuremaps(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.maxpool_5a(x) x = self.mixed_5b(x) x = self.repeat(x) x = self.mixed_6a(x) x = self.repeat_1(x) x = self.mixed_7a(x) x = self.repeat_2(x) x = self.block8(x) x = self.conv2d_7b(x) return x def forward(self, x): f = self.featuremaps(x) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def inceptionresnetv2(num_classes, loss='softmax', pretrained=True, **kwargs): model = InceptionResNetV2(num_classes=num_classes, loss=loss, **kwargs) if pretrained: model.load_imagenet_weights() return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/inceptionv4.py ================================================ from __future__ import division, absolute_import import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo __all__ = ['inceptionv4'] """ Code imported from https://github.com/Cadene/pretrained-models.pytorch """ pretrained_settings = { 'inceptionv4': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth', 'input_space': 'RGB', 'input_size': [3, 299, 299], 'input_range': [0, 1], 'mean': [0.5, 0.5, 0.5], 'std': [0.5, 0.5, 0.5], 'num_classes': 1000 }, 'imagenet+background': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth', 'input_space': 'RGB', 'input_size': [3, 299, 299], 'input_range': [0, 1], 'mean': [0.5, 0.5, 0.5], 'std': [0.5, 0.5, 0.5], 'num_classes': 1001 } } } class BasicConv2d(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d( in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False ) # verify bias false self.bn = nn.BatchNorm2d( out_planes, eps=0.001, # value found in tensorflow momentum=0.1, # default pytorch value affine=True ) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x class Mixed_3a(nn.Module): def __init__(self): super(Mixed_3a, self).__init__() self.maxpool = nn.MaxPool2d(3, stride=2) self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) def forward(self, x): x0 = self.maxpool(x) x1 = self.conv(x) out = torch.cat((x0, x1), 1) return out class Mixed_4a(nn.Module): def __init__(self): super(Mixed_4a, self).__init__() self.branch0 = nn.Sequential( BasicConv2d(160, 64, kernel_size=1, stride=1), BasicConv2d(64, 96, kernel_size=3, stride=1) ) self.branch1 = nn.Sequential( BasicConv2d(160, 64, kernel_size=1, stride=1), BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) return out class Mixed_5a(nn.Module): def __init__(self): super(Mixed_5a, self).__init__() self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) self.maxpool = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.conv(x) x1 = self.maxpool(x) out = torch.cat((x0, x1), 1) return out class Inception_A(nn.Module): def __init__(self): super(Inception_A, self).__init__() self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(384, 64, kernel_size=1, stride=1), BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( BasicConv2d(384, 64, kernel_size=1, stride=1), BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), BasicConv2d(384, 96, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Reduction_A(nn.Module): def __init__(self): super(Reduction_A, self).__init__() self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( BasicConv2d(384, 192, kernel_size=1, stride=1), BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), BasicConv2d(224, 256, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Inception_B(nn.Module): def __init__(self): super(Inception_B, self).__init__() self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) self.branch1 = nn.Sequential( BasicConv2d(1024, 192, kernel_size=1, stride=1), BasicConv2d( 192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3) ), BasicConv2d( 224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0) ) ) self.branch2 = nn.Sequential( BasicConv2d(1024, 192, kernel_size=1, stride=1), BasicConv2d( 192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0) ), BasicConv2d( 192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3) ), BasicConv2d( 224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0) ), BasicConv2d( 224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3) ) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), BasicConv2d(1024, 128, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Reduction_B(nn.Module): def __init__(self): super(Reduction_B, self).__init__() self.branch0 = nn.Sequential( BasicConv2d(1024, 192, kernel_size=1, stride=1), BasicConv2d(192, 192, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( BasicConv2d(1024, 256, kernel_size=1, stride=1), BasicConv2d( 256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3) ), BasicConv2d( 256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0) ), BasicConv2d(320, 320, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Inception_C(nn.Module): def __init__(self): super(Inception_C, self).__init__() self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) self.branch1_1a = BasicConv2d( 384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1) ) self.branch1_1b = BasicConv2d( 384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) ) self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) self.branch2_1 = BasicConv2d( 384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0) ) self.branch2_2 = BasicConv2d( 448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1) ) self.branch2_3a = BasicConv2d( 512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1) ) self.branch2_3b = BasicConv2d( 512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), BasicConv2d(1536, 256, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1_0 = self.branch1_0(x) x1_1a = self.branch1_1a(x1_0) x1_1b = self.branch1_1b(x1_0) x1 = torch.cat((x1_1a, x1_1b), 1) x2_0 = self.branch2_0(x) x2_1 = self.branch2_1(x2_0) x2_2 = self.branch2_2(x2_1) x2_3a = self.branch2_3a(x2_2) x2_3b = self.branch2_3b(x2_2) x2 = torch.cat((x2_3a, x2_3b), 1) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class InceptionV4(nn.Module): """Inception-v4. Reference: Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning. AAAI 2017. Public keys: - ``inceptionv4``: InceptionV4. """ def __init__(self, num_classes, loss, **kwargs): super(InceptionV4, self).__init__() self.loss = loss self.features = nn.Sequential( BasicConv2d(3, 32, kernel_size=3, stride=2), BasicConv2d(32, 32, kernel_size=3, stride=1), BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), Mixed_3a(), Mixed_4a(), Mixed_5a(), Inception_A(), Inception_A(), Inception_A(), Inception_A(), Reduction_A(), # Mixed_6a Inception_B(), Inception_B(), Inception_B(), Inception_B(), Inception_B(), Inception_B(), Inception_B(), Reduction_B(), # Mixed_7a Inception_C(), Inception_C(), Inception_C() ) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(1536, num_classes) def forward(self, x): f = self.features(x) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def inceptionv4(num_classes, loss='softmax', pretrained=True, **kwargs): model = InceptionV4(num_classes, loss, **kwargs) if pretrained: model_url = pretrained_settings['inceptionv4']['imagenet']['url'] init_pretrained_weights(model, model_url) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/mlfn.py ================================================ from __future__ import division, absolute_import import torch import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import functional as F __all__ = ['mlfn'] model_urls = { # training epoch = 5, top1 = 51.6 'imagenet': 'https://mega.nz/#!YHxAhaxC!yu9E6zWl0x5zscSouTdbZu8gdFFytDdl-RAdD2DEfpk', } class MLFNBlock(nn.Module): def __init__( self, in_channels, out_channels, stride, fsm_channels, groups=32 ): super(MLFNBlock, self).__init__() self.groups = groups mid_channels = out_channels // 2 # Factor Modules self.fm_conv1 = nn.Conv2d(in_channels, mid_channels, 1, bias=False) self.fm_bn1 = nn.BatchNorm2d(mid_channels) self.fm_conv2 = nn.Conv2d( mid_channels, mid_channels, 3, stride=stride, padding=1, bias=False, groups=self.groups ) self.fm_bn2 = nn.BatchNorm2d(mid_channels) self.fm_conv3 = nn.Conv2d(mid_channels, out_channels, 1, bias=False) self.fm_bn3 = nn.BatchNorm2d(out_channels) # Factor Selection Module self.fsm = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, fsm_channels[0], 1), nn.BatchNorm2d(fsm_channels[0]), nn.ReLU(inplace=True), nn.Conv2d(fsm_channels[0], fsm_channels[1], 1), nn.BatchNorm2d(fsm_channels[1]), nn.ReLU(inplace=True), nn.Conv2d(fsm_channels[1], self.groups, 1), nn.BatchNorm2d(self.groups), nn.Sigmoid(), ) self.downsample = None if in_channels != out_channels or stride > 1: self.downsample = nn.Sequential( nn.Conv2d( in_channels, out_channels, 1, stride=stride, bias=False ), nn.BatchNorm2d(out_channels), ) def forward(self, x): residual = x s = self.fsm(x) # reduce dimension x = self.fm_conv1(x) x = self.fm_bn1(x) x = F.relu(x, inplace=True) # group convolution x = self.fm_conv2(x) x = self.fm_bn2(x) x = F.relu(x, inplace=True) # factor selection b, c = x.size(0), x.size(1) n = c // self.groups ss = s.repeat(1, n, 1, 1) # from (b, g, 1, 1) to (b, g*n=c, 1, 1) ss = ss.view(b, n, self.groups, 1, 1) ss = ss.permute(0, 2, 1, 3, 4).contiguous() ss = ss.view(b, c, 1, 1) x = ss * x # recover dimension x = self.fm_conv3(x) x = self.fm_bn3(x) x = F.relu(x, inplace=True) if self.downsample is not None: residual = self.downsample(residual) return F.relu(residual + x, inplace=True), s class MLFN(nn.Module): """Multi-Level Factorisation Net. Reference: Chang et al. Multi-Level Factorisation Net for Person Re-Identification. CVPR 2018. Public keys: - ``mlfn``: MLFN (Multi-Level Factorisation Net). """ def __init__( self, num_classes, loss='softmax', groups=32, channels=[64, 256, 512, 1024, 2048], embed_dim=1024, **kwargs ): super(MLFN, self).__init__() self.loss = loss self.groups = groups # first convolutional layer self.conv1 = nn.Conv2d(3, channels[0], 7, stride=2, padding=3) self.bn1 = nn.BatchNorm2d(channels[0]) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) # main body self.feature = nn.ModuleList( [ # layer 1-3 MLFNBlock(channels[0], channels[1], 1, [128, 64], self.groups), MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups), MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups), # layer 4-7 MLFNBlock( channels[1], channels[2], 2, [256, 128], self.groups ), MLFNBlock( channels[2], channels[2], 1, [256, 128], self.groups ), MLFNBlock( channels[2], channels[2], 1, [256, 128], self.groups ), MLFNBlock( channels[2], channels[2], 1, [256, 128], self.groups ), # layer 8-13 MLFNBlock( channels[2], channels[3], 2, [512, 128], self.groups ), MLFNBlock( channels[3], channels[3], 1, [512, 128], self.groups ), MLFNBlock( channels[3], channels[3], 1, [512, 128], self.groups ), MLFNBlock( channels[3], channels[3], 1, [512, 128], self.groups ), MLFNBlock( channels[3], channels[3], 1, [512, 128], self.groups ), MLFNBlock( channels[3], channels[3], 1, [512, 128], self.groups ), # layer 14-16 MLFNBlock( channels[3], channels[4], 2, [512, 128], self.groups ), MLFNBlock( channels[4], channels[4], 1, [512, 128], self.groups ), MLFNBlock( channels[4], channels[4], 1, [512, 128], self.groups ), ] ) self.global_avgpool = nn.AdaptiveAvgPool2d(1) # projection functions self.fc_x = nn.Sequential( nn.Conv2d(channels[4], embed_dim, 1, bias=False), nn.BatchNorm2d(embed_dim), nn.ReLU(inplace=True), ) self.fc_s = nn.Sequential( nn.Conv2d(self.groups * 16, embed_dim, 1, bias=False), nn.BatchNorm2d(embed_dim), nn.ReLU(inplace=True), ) self.classifier = nn.Linear(embed_dim, num_classes) self.init_params() def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = F.relu(x, inplace=True) x = self.maxpool(x) s_hat = [] for block in self.feature: x, s = block(x) s_hat.append(s) s_hat = torch.cat(s_hat, 1) x = self.global_avgpool(x) x = self.fc_x(x) s_hat = self.fc_s(s_hat) v = (x+s_hat) * 0.5 v = v.view(v.size(0), -1) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def mlfn(num_classes, loss='softmax', pretrained=True, **kwargs): model = MLFN(num_classes, loss, **kwargs) if pretrained: # init_pretrained_weights(model, model_urls['imagenet']) import warnings warnings.warn( 'The imagenet pretrained weights need to be manually downloaded from {}' .format(model_urls['imagenet']) ) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/mobilenetv2.py ================================================ from __future__ import division, absolute_import import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import functional as F __all__ = ['mobilenetv2_x1_0', 'mobilenetv2_x1_4'] model_urls = { # 1.0: top-1 71.3 'mobilenetv2_x1_0': 'https://mega.nz/#!NKp2wAIA!1NH1pbNzY_M2hVk_hdsxNM1NUOWvvGPHhaNr-fASF6c', # 1.4: top-1 73.9 'mobilenetv2_x1_4': 'https://mega.nz/#!RGhgEIwS!xN2s2ZdyqI6vQ3EwgmRXLEW3khr9tpXg96G9SUJugGk', } class ConvBlock(nn.Module): """Basic convolutional block. convolution (bias discarded) + batch normalization + relu6. Args: in_c (int): number of input channels. out_c (int): number of output channels. k (int or tuple): kernel size. s (int or tuple): stride. p (int or tuple): padding. g (int): number of blocked connections from input channels to output channels (default: 1). """ def __init__(self, in_c, out_c, k, s=1, p=0, g=1): super(ConvBlock, self).__init__() self.conv = nn.Conv2d( in_c, out_c, k, stride=s, padding=p, bias=False, groups=g ) self.bn = nn.BatchNorm2d(out_c) def forward(self, x): return F.relu6(self.bn(self.conv(x))) class Bottleneck(nn.Module): def __init__(self, in_channels, out_channels, expansion_factor, stride=1): super(Bottleneck, self).__init__() mid_channels = in_channels * expansion_factor self.use_residual = stride == 1 and in_channels == out_channels self.conv1 = ConvBlock(in_channels, mid_channels, 1) self.dwconv2 = ConvBlock( mid_channels, mid_channels, 3, stride, 1, g=mid_channels ) self.conv3 = nn.Sequential( nn.Conv2d(mid_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), ) def forward(self, x): m = self.conv1(x) m = self.dwconv2(m) m = self.conv3(m) if self.use_residual: return x + m else: return m class MobileNetV2(nn.Module): """MobileNetV2. Reference: Sandler et al. MobileNetV2: Inverted Residuals and Linear Bottlenecks. CVPR 2018. Public keys: - ``mobilenetv2_x1_0``: MobileNetV2 x1.0. - ``mobilenetv2_x1_4``: MobileNetV2 x1.4. """ def __init__( self, num_classes, width_mult=1, loss='softmax', fc_dims=None, dropout_p=None, **kwargs ): super(MobileNetV2, self).__init__() self.loss = loss self.in_channels = int(32 * width_mult) self.feature_dim = int(1280 * width_mult) if width_mult > 1 else 1280 # construct layers self.conv1 = ConvBlock(3, self.in_channels, 3, s=2, p=1) self.conv2 = self._make_layer( Bottleneck, 1, int(16 * width_mult), 1, 1 ) self.conv3 = self._make_layer( Bottleneck, 6, int(24 * width_mult), 2, 2 ) self.conv4 = self._make_layer( Bottleneck, 6, int(32 * width_mult), 3, 2 ) self.conv5 = self._make_layer( Bottleneck, 6, int(64 * width_mult), 4, 2 ) self.conv6 = self._make_layer( Bottleneck, 6, int(96 * width_mult), 3, 1 ) self.conv7 = self._make_layer( Bottleneck, 6, int(160 * width_mult), 3, 2 ) self.conv8 = self._make_layer( Bottleneck, 6, int(320 * width_mult), 1, 1 ) self.conv9 = ConvBlock(self.in_channels, self.feature_dim, 1) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.fc = self._construct_fc_layer( fc_dims, self.feature_dim, dropout_p ) self.classifier = nn.Linear(self.feature_dim, num_classes) self._init_params() def _make_layer(self, block, t, c, n, s): # t: expansion factor # c: output channels # n: number of blocks # s: stride for first layer layers = [] layers.append(block(self.in_channels, c, t, s)) self.in_channels = c for i in range(1, n): layers.append(block(self.in_channels, c, t)) return nn.Sequential(*layers) def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """Constructs fully connected layer. Args: fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed input_dim (int): input dimension dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def featuremaps(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.conv5(x) x = self.conv6(x) x = self.conv7(x) x = self.conv8(x) x = self.conv9(x) return x def forward(self, x): f = self.featuremaps(x) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError("Unsupported loss: {}".format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def mobilenetv2_x1_0(num_classes, loss, pretrained=True, **kwargs): model = MobileNetV2( num_classes, loss=loss, width_mult=1, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: # init_pretrained_weights(model, model_urls['mobilenetv2_x1_0']) import warnings warnings.warn( 'The imagenet pretrained weights need to be manually downloaded from {}' .format(model_urls['mobilenetv2_x1_0']) ) return model def mobilenetv2_x1_4(num_classes, loss, pretrained=True, **kwargs): model = MobileNetV2( num_classes, loss=loss, width_mult=1.4, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: # init_pretrained_weights(model, model_urls['mobilenetv2_x1_4']) import warnings warnings.warn( 'The imagenet pretrained weights need to be manually downloaded from {}' .format(model_urls['mobilenetv2_x1_4']) ) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/mudeep.py ================================================ from __future__ import division, absolute_import import torch from torch import nn from torch.nn import functional as F __all__ = ['MuDeep'] class ConvBlock(nn.Module): """Basic convolutional block. convolution + batch normalization + relu. Args: in_c (int): number of input channels. out_c (int): number of output channels. k (int or tuple): kernel size. s (int or tuple): stride. p (int or tuple): padding. """ def __init__(self, in_c, out_c, k, s, p): super(ConvBlock, self).__init__() self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p) self.bn = nn.BatchNorm2d(out_c) def forward(self, x): return F.relu(self.bn(self.conv(x))) class ConvLayers(nn.Module): """Preprocessing layers.""" def __init__(self): super(ConvLayers, self).__init__() self.conv1 = ConvBlock(3, 48, k=3, s=1, p=1) self.conv2 = ConvBlock(48, 96, k=3, s=1, p=1) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.maxpool(x) return x class MultiScaleA(nn.Module): """Multi-scale stream layer A (Sec.3.1)""" def __init__(self): super(MultiScaleA, self).__init__() self.stream1 = nn.Sequential( ConvBlock(96, 96, k=1, s=1, p=0), ConvBlock(96, 24, k=3, s=1, p=1), ) self.stream2 = nn.Sequential( nn.AvgPool2d(kernel_size=3, stride=1, padding=1), ConvBlock(96, 24, k=1, s=1, p=0), ) self.stream3 = ConvBlock(96, 24, k=1, s=1, p=0) self.stream4 = nn.Sequential( ConvBlock(96, 16, k=1, s=1, p=0), ConvBlock(16, 24, k=3, s=1, p=1), ConvBlock(24, 24, k=3, s=1, p=1), ) def forward(self, x): s1 = self.stream1(x) s2 = self.stream2(x) s3 = self.stream3(x) s4 = self.stream4(x) y = torch.cat([s1, s2, s3, s4], dim=1) return y class Reduction(nn.Module): """Reduction layer (Sec.3.1)""" def __init__(self): super(Reduction, self).__init__() self.stream1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.stream2 = ConvBlock(96, 96, k=3, s=2, p=1) self.stream3 = nn.Sequential( ConvBlock(96, 48, k=1, s=1, p=0), ConvBlock(48, 56, k=3, s=1, p=1), ConvBlock(56, 64, k=3, s=2, p=1), ) def forward(self, x): s1 = self.stream1(x) s2 = self.stream2(x) s3 = self.stream3(x) y = torch.cat([s1, s2, s3], dim=1) return y class MultiScaleB(nn.Module): """Multi-scale stream layer B (Sec.3.1)""" def __init__(self): super(MultiScaleB, self).__init__() self.stream1 = nn.Sequential( nn.AvgPool2d(kernel_size=3, stride=1, padding=1), ConvBlock(256, 256, k=1, s=1, p=0), ) self.stream2 = nn.Sequential( ConvBlock(256, 64, k=1, s=1, p=0), ConvBlock(64, 128, k=(1, 3), s=1, p=(0, 1)), ConvBlock(128, 256, k=(3, 1), s=1, p=(1, 0)), ) self.stream3 = ConvBlock(256, 256, k=1, s=1, p=0) self.stream4 = nn.Sequential( ConvBlock(256, 64, k=1, s=1, p=0), ConvBlock(64, 64, k=(1, 3), s=1, p=(0, 1)), ConvBlock(64, 128, k=(3, 1), s=1, p=(1, 0)), ConvBlock(128, 128, k=(1, 3), s=1, p=(0, 1)), ConvBlock(128, 256, k=(3, 1), s=1, p=(1, 0)), ) def forward(self, x): s1 = self.stream1(x) s2 = self.stream2(x) s3 = self.stream3(x) s4 = self.stream4(x) return s1, s2, s3, s4 class Fusion(nn.Module): """Saliency-based learning fusion layer (Sec.3.2)""" def __init__(self): super(Fusion, self).__init__() self.a1 = nn.Parameter(torch.rand(1, 256, 1, 1)) self.a2 = nn.Parameter(torch.rand(1, 256, 1, 1)) self.a3 = nn.Parameter(torch.rand(1, 256, 1, 1)) self.a4 = nn.Parameter(torch.rand(1, 256, 1, 1)) # We add an average pooling layer to reduce the spatial dimension # of feature maps, which differs from the original paper. self.avgpool = nn.AvgPool2d(kernel_size=4, stride=4, padding=0) def forward(self, x1, x2, x3, x4): s1 = self.a1.expand_as(x1) * x1 s2 = self.a2.expand_as(x2) * x2 s3 = self.a3.expand_as(x3) * x3 s4 = self.a4.expand_as(x4) * x4 y = self.avgpool(s1 + s2 + s3 + s4) return y class MuDeep(nn.Module): """Multiscale deep neural network. Reference: Qian et al. Multi-scale Deep Learning Architectures for Person Re-identification. ICCV 2017. Public keys: - ``mudeep``: Multiscale deep neural network. """ def __init__(self, num_classes, loss='softmax', **kwargs): super(MuDeep, self).__init__() self.loss = loss self.block1 = ConvLayers() self.block2 = MultiScaleA() self.block3 = Reduction() self.block4 = MultiScaleB() self.block5 = Fusion() # Due to this fully connected layer, input image has to be fixed # in shape, i.e. (3, 256, 128), such that the last convolutional feature # maps are of shape (256, 16, 8). If input shape is changed, # the input dimension of this layer has to be changed accordingly. self.fc = nn.Sequential( nn.Linear(256 * 16 * 8, 4096), nn.BatchNorm1d(4096), nn.ReLU(), ) self.classifier = nn.Linear(4096, num_classes) self.feat_dim = 4096 def featuremaps(self, x): x = self.block1(x) x = self.block2(x) x = self.block3(x) x = self.block4(x) x = self.block5(*x) return x def forward(self, x): x = self.featuremaps(x) x = x.view(x.size(0), -1) x = self.fc(x) y = self.classifier(x) if not self.training: return x if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, x else: raise KeyError('Unsupported loss: {}'.format(self.loss)) ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/nasnet.py ================================================ from __future__ import division, absolute_import import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.model_zoo as model_zoo __all__ = ['nasnetamobile'] """ NASNet Mobile Thanks to Anastasiia (https://github.com/DagnyT) for the great help, support and motivation! ------------------------------------------------------------------------------------ Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M) ------------------------------------------------------------------------------------ | NASNet-A (4 @ 1056) | 74.08% | 91.74% | 564 M | 5.3 | ------------------------------------------------------------------------------------ # References: - [Learning Transferable Architectures for Scalable Image Recognition] (https://arxiv.org/abs/1707.07012) """ """ Code imported from https://github.com/Cadene/pretrained-models.pytorch """ pretrained_settings = { 'nasnetamobile': { 'imagenet': { # 'url': 'https://github.com/veronikayurchuk/pretrained-models.pytorch/releases/download/v1.0/nasnetmobile-7e03cead.pth.tar', 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetamobile-7e03cead.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], # resize 256 'input_range': [0, 1], 'mean': [0.5, 0.5, 0.5], 'std': [0.5, 0.5, 0.5], 'num_classes': 1000 }, # 'imagenet+background': { # # 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth', # 'input_space': 'RGB', # 'input_size': [3, 224, 224], # resize 256 # 'input_range': [0, 1], # 'mean': [0.5, 0.5, 0.5], # 'std': [0.5, 0.5, 0.5], # 'num_classes': 1001 # } } } class MaxPoolPad(nn.Module): def __init__(self): super(MaxPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x class AvgPoolPad(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d( 3, stride=stride, padding=padding, count_include_pad=False ) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:].contiguous() return x class SeparableConv2d(nn.Module): def __init__( self, in_channels, out_channels, dw_kernel, dw_stride, dw_padding, bias=False ): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = nn.Conv2d( in_channels, in_channels, dw_kernel, stride=dw_stride, padding=dw_padding, bias=bias, groups=in_channels ) self.pointwise_conv2d = nn.Conv2d( in_channels, out_channels, 1, stride=1, bias=bias ) def forward(self, x): x = self.depthwise_conv2d(x) x = self.pointwise_conv2d(x) return x class BranchSeparables(nn.Module): def __init__( self, in_channels, out_channels, kernel_size, stride, padding, name=None, bias=False ): super(BranchSeparables, self).__init__() self.relu = nn.ReLU() self.separable_1 = SeparableConv2d( in_channels, in_channels, kernel_size, stride, padding, bias=bias ) self.bn_sep_1 = nn.BatchNorm2d( in_channels, eps=0.001, momentum=0.1, affine=True ) self.relu1 = nn.ReLU() self.separable_2 = SeparableConv2d( in_channels, out_channels, kernel_size, 1, padding, bias=bias ) self.bn_sep_2 = nn.BatchNorm2d( out_channels, eps=0.001, momentum=0.1, affine=True ) self.name = name def forward(self, x): x = self.relu(x) if self.name == 'specific': x = nn.ZeroPad2d((1, 0, 1, 0))(x) x = self.separable_1(x) if self.name == 'specific': x = x[:, :, 1:, 1:].contiguous() x = self.bn_sep_1(x) x = self.relu1(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class BranchSeparablesStem(nn.Module): def __init__( self, in_channels, out_channels, kernel_size, stride, padding, bias=False ): super(BranchSeparablesStem, self).__init__() self.relu = nn.ReLU() self.separable_1 = SeparableConv2d( in_channels, out_channels, kernel_size, stride, padding, bias=bias ) self.bn_sep_1 = nn.BatchNorm2d( out_channels, eps=0.001, momentum=0.1, affine=True ) self.relu1 = nn.ReLU() self.separable_2 = SeparableConv2d( out_channels, out_channels, kernel_size, 1, padding, bias=bias ) self.bn_sep_2 = nn.BatchNorm2d( out_channels, eps=0.001, momentum=0.1, affine=True ) def forward(self, x): x = self.relu(x) x = self.separable_1(x) x = self.bn_sep_1(x) x = self.relu1(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class BranchSeparablesReduction(BranchSeparables): def __init__( self, in_channels, out_channels, kernel_size, stride, padding, z_padding=1, bias=False ): BranchSeparables.__init__( self, in_channels, out_channels, kernel_size, stride, padding, bias ) self.padding = nn.ZeroPad2d((z_padding, 0, z_padding, 0)) def forward(self, x): x = self.relu(x) x = self.padding(x) x = self.separable_1(x) x = x[:, :, 1:, 1:].contiguous() x = self.bn_sep_1(x) x = self.relu1(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class CellStem0(nn.Module): def __init__(self, stem_filters, num_filters=42): super(CellStem0, self).__init__() self.num_filters = num_filters self.stem_filters = stem_filters self.conv_1x1 = nn.Sequential() self.conv_1x1.add_module('relu', nn.ReLU()) self.conv_1x1.add_module( 'conv', nn.Conv2d( self.stem_filters, self.num_filters, 1, stride=1, bias=False ) ) self.conv_1x1.add_module( 'bn', nn.BatchNorm2d( self.num_filters, eps=0.001, momentum=0.1, affine=True ) ) self.comb_iter_0_left = BranchSeparables( self.num_filters, self.num_filters, 5, 2, 2 ) self.comb_iter_0_right = BranchSeparablesStem( self.stem_filters, self.num_filters, 7, 2, 3, bias=False ) self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) self.comb_iter_1_right = BranchSeparablesStem( self.stem_filters, self.num_filters, 7, 2, 3, bias=False ) self.comb_iter_2_left = nn.AvgPool2d( 3, stride=2, padding=1, count_include_pad=False ) self.comb_iter_2_right = BranchSeparablesStem( self.stem_filters, self.num_filters, 5, 2, 2, bias=False ) self.comb_iter_3_right = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_4_left = BranchSeparables( self.num_filters, self.num_filters, 3, 1, 1, bias=False ) self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x1 = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x1) x_comb_iter_0_right = self.comb_iter_0_right(x) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x1) x_comb_iter_1_right = self.comb_iter_1_right(x) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x1) x_comb_iter_2_right = self.comb_iter_2_right(x) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x1) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat( [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 ) return x_out class CellStem1(nn.Module): def __init__(self, stem_filters, num_filters): super(CellStem1, self).__init__() self.num_filters = num_filters self.stem_filters = stem_filters self.conv_1x1 = nn.Sequential() self.conv_1x1.add_module('relu', nn.ReLU()) self.conv_1x1.add_module( 'conv', nn.Conv2d( 2 * self.num_filters, self.num_filters, 1, stride=1, bias=False ) ) self.conv_1x1.add_module( 'bn', nn.BatchNorm2d( self.num_filters, eps=0.001, momentum=0.1, affine=True ) ) self.relu = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module( 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) ) self.path_1.add_module( 'conv', nn.Conv2d( self.stem_filters, self.num_filters // 2, 1, stride=1, bias=False ) ) self.path_2 = nn.ModuleList() self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1))) self.path_2.add_module( 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) ) self.path_2.add_module( 'conv', nn.Conv2d( self.stem_filters, self.num_filters // 2, 1, stride=1, bias=False ) ) self.final_path_bn = nn.BatchNorm2d( self.num_filters, eps=0.001, momentum=0.1, affine=True ) self.comb_iter_0_left = BranchSeparables( self.num_filters, self.num_filters, 5, 2, 2, name='specific', bias=False ) self.comb_iter_0_right = BranchSeparables( self.num_filters, self.num_filters, 7, 2, 3, name='specific', bias=False ) # self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) self.comb_iter_1_left = MaxPoolPad() self.comb_iter_1_right = BranchSeparables( self.num_filters, self.num_filters, 7, 2, 3, name='specific', bias=False ) # self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False) self.comb_iter_2_left = AvgPoolPad() self.comb_iter_2_right = BranchSeparables( self.num_filters, self.num_filters, 5, 2, 2, name='specific', bias=False ) self.comb_iter_3_right = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_4_left = BranchSeparables( self.num_filters, self.num_filters, 3, 1, 1, name='specific', bias=False ) # self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) self.comb_iter_4_right = MaxPoolPad() def forward(self, x_conv0, x_stem_0): x_left = self.conv_1x1(x_stem_0) x_relu = self.relu(x_conv0) # path 1 x_path1 = self.path_1(x_relu) # path 2 x_path2 = self.path_2.pad(x_relu) x_path2 = x_path2[:, :, 1:, 1:] x_path2 = self.path_2.avgpool(x_path2) x_path2 = self.path_2.conv(x_path2) # final path x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_comb_iter_0_left = self.comb_iter_0_left(x_left) x_comb_iter_0_right = self.comb_iter_0_right(x_right) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_right) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_left) x_comb_iter_2_right = self.comb_iter_2_right(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_left) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat( [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 ) return x_out class FirstCell(nn.Module): def __init__( self, in_channels_left, out_channels_left, in_channels_right, out_channels_right ): super(FirstCell, self).__init__() self.conv_1x1 = nn.Sequential() self.conv_1x1.add_module('relu', nn.ReLU()) self.conv_1x1.add_module( 'conv', nn.Conv2d( in_channels_right, out_channels_right, 1, stride=1, bias=False ) ) self.conv_1x1.add_module( 'bn', nn.BatchNorm2d( out_channels_right, eps=0.001, momentum=0.1, affine=True ) ) self.relu = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module( 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) ) self.path_1.add_module( 'conv', nn.Conv2d( in_channels_left, out_channels_left, 1, stride=1, bias=False ) ) self.path_2 = nn.ModuleList() self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1))) self.path_2.add_module( 'avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False) ) self.path_2.add_module( 'conv', nn.Conv2d( in_channels_left, out_channels_left, 1, stride=1, bias=False ) ) self.final_path_bn = nn.BatchNorm2d( out_channels_left * 2, eps=0.001, momentum=0.1, affine=True ) self.comb_iter_0_left = BranchSeparables( out_channels_right, out_channels_right, 5, 1, 2, bias=False ) self.comb_iter_0_right = BranchSeparables( out_channels_right, out_channels_right, 3, 1, 1, bias=False ) self.comb_iter_1_left = BranchSeparables( out_channels_right, out_channels_right, 5, 1, 2, bias=False ) self.comb_iter_1_right = BranchSeparables( out_channels_right, out_channels_right, 3, 1, 1, bias=False ) self.comb_iter_2_left = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_3_left = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_3_right = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_4_left = BranchSeparables( out_channels_right, out_channels_right, 3, 1, 1, bias=False ) def forward(self, x, x_prev): x_relu = self.relu(x_prev) # path 1 x_path1 = self.path_1(x_relu) # path 2 x_path2 = self.path_2.pad(x_relu) x_path2 = x_path2[:, :, 1:, 1:] x_path2 = self.path_2.avgpool(x_path2) x_path2 = self.path_2.conv(x_path2) # final path x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat( [ x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4 ], 1 ) return x_out class NormalCell(nn.Module): def __init__( self, in_channels_left, out_channels_left, in_channels_right, out_channels_right ): super(NormalCell, self).__init__() self.conv_prev_1x1 = nn.Sequential() self.conv_prev_1x1.add_module('relu', nn.ReLU()) self.conv_prev_1x1.add_module( 'conv', nn.Conv2d( in_channels_left, out_channels_left, 1, stride=1, bias=False ) ) self.conv_prev_1x1.add_module( 'bn', nn.BatchNorm2d( out_channels_left, eps=0.001, momentum=0.1, affine=True ) ) self.conv_1x1 = nn.Sequential() self.conv_1x1.add_module('relu', nn.ReLU()) self.conv_1x1.add_module( 'conv', nn.Conv2d( in_channels_right, out_channels_right, 1, stride=1, bias=False ) ) self.conv_1x1.add_module( 'bn', nn.BatchNorm2d( out_channels_right, eps=0.001, momentum=0.1, affine=True ) ) self.comb_iter_0_left = BranchSeparables( out_channels_right, out_channels_right, 5, 1, 2, bias=False ) self.comb_iter_0_right = BranchSeparables( out_channels_left, out_channels_left, 3, 1, 1, bias=False ) self.comb_iter_1_left = BranchSeparables( out_channels_left, out_channels_left, 5, 1, 2, bias=False ) self.comb_iter_1_right = BranchSeparables( out_channels_left, out_channels_left, 3, 1, 1, bias=False ) self.comb_iter_2_left = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_3_left = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_3_right = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_4_left = BranchSeparables( out_channels_right, out_channels_right, 3, 1, 1, bias=False ) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat( [ x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4 ], 1 ) return x_out class ReductionCell0(nn.Module): def __init__( self, in_channels_left, out_channels_left, in_channels_right, out_channels_right ): super(ReductionCell0, self).__init__() self.conv_prev_1x1 = nn.Sequential() self.conv_prev_1x1.add_module('relu', nn.ReLU()) self.conv_prev_1x1.add_module( 'conv', nn.Conv2d( in_channels_left, out_channels_left, 1, stride=1, bias=False ) ) self.conv_prev_1x1.add_module( 'bn', nn.BatchNorm2d( out_channels_left, eps=0.001, momentum=0.1, affine=True ) ) self.conv_1x1 = nn.Sequential() self.conv_1x1.add_module('relu', nn.ReLU()) self.conv_1x1.add_module( 'conv', nn.Conv2d( in_channels_right, out_channels_right, 1, stride=1, bias=False ) ) self.conv_1x1.add_module( 'bn', nn.BatchNorm2d( out_channels_right, eps=0.001, momentum=0.1, affine=True ) ) self.comb_iter_0_left = BranchSeparablesReduction( out_channels_right, out_channels_right, 5, 2, 2, bias=False ) self.comb_iter_0_right = BranchSeparablesReduction( out_channels_right, out_channels_right, 7, 2, 3, bias=False ) self.comb_iter_1_left = MaxPoolPad() self.comb_iter_1_right = BranchSeparablesReduction( out_channels_right, out_channels_right, 7, 2, 3, bias=False ) self.comb_iter_2_left = AvgPoolPad() self.comb_iter_2_right = BranchSeparablesReduction( out_channels_right, out_channels_right, 5, 2, 2, bias=False ) self.comb_iter_3_right = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_4_left = BranchSeparablesReduction( out_channels_right, out_channels_right, 3, 1, 1, bias=False ) self.comb_iter_4_right = MaxPoolPad() def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat( [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 ) return x_out class ReductionCell1(nn.Module): def __init__( self, in_channels_left, out_channels_left, in_channels_right, out_channels_right ): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = nn.Sequential() self.conv_prev_1x1.add_module('relu', nn.ReLU()) self.conv_prev_1x1.add_module( 'conv', nn.Conv2d( in_channels_left, out_channels_left, 1, stride=1, bias=False ) ) self.conv_prev_1x1.add_module( 'bn', nn.BatchNorm2d( out_channels_left, eps=0.001, momentum=0.1, affine=True ) ) self.conv_1x1 = nn.Sequential() self.conv_1x1.add_module('relu', nn.ReLU()) self.conv_1x1.add_module( 'conv', nn.Conv2d( in_channels_right, out_channels_right, 1, stride=1, bias=False ) ) self.conv_1x1.add_module( 'bn', nn.BatchNorm2d( out_channels_right, eps=0.001, momentum=0.1, affine=True ) ) self.comb_iter_0_left = BranchSeparables( out_channels_right, out_channels_right, 5, 2, 2, name='specific', bias=False ) self.comb_iter_0_right = BranchSeparables( out_channels_right, out_channels_right, 7, 2, 3, name='specific', bias=False ) # self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1) self.comb_iter_1_left = MaxPoolPad() self.comb_iter_1_right = BranchSeparables( out_channels_right, out_channels_right, 7, 2, 3, name='specific', bias=False ) # self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False) self.comb_iter_2_left = AvgPoolPad() self.comb_iter_2_right = BranchSeparables( out_channels_right, out_channels_right, 5, 2, 2, name='specific', bias=False ) self.comb_iter_3_right = nn.AvgPool2d( 3, stride=1, padding=1, count_include_pad=False ) self.comb_iter_4_left = BranchSeparables( out_channels_right, out_channels_right, 3, 1, 1, name='specific', bias=False ) # self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1) self.comb_iter_4_right = MaxPoolPad() def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat( [x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1 ) return x_out class NASNetAMobile(nn.Module): """Neural Architecture Search (NAS). Reference: Zoph et al. Learning Transferable Architectures for Scalable Image Recognition. CVPR 2018. Public keys: - ``nasnetamobile``: NASNet-A Mobile. """ def __init__( self, num_classes, loss, stem_filters=32, penultimate_filters=1056, filters_multiplier=2, **kwargs ): super(NASNetAMobile, self).__init__() self.stem_filters = stem_filters self.penultimate_filters = penultimate_filters self.filters_multiplier = filters_multiplier self.loss = loss filters = self.penultimate_filters // 24 # 24 is default value for the architecture self.conv0 = nn.Sequential() self.conv0.add_module( 'conv', nn.Conv2d( in_channels=3, out_channels=self.stem_filters, kernel_size=3, padding=0, stride=2, bias=False ) ) self.conv0.add_module( 'bn', nn.BatchNorm2d( self.stem_filters, eps=0.001, momentum=0.1, affine=True ) ) self.cell_stem_0 = CellStem0( self.stem_filters, num_filters=filters // (filters_multiplier**2) ) self.cell_stem_1 = CellStem1( self.stem_filters, num_filters=filters // filters_multiplier ) self.cell_0 = FirstCell( in_channels_left=filters, out_channels_left=filters // 2, # 1, 0.5 in_channels_right=2 * filters, out_channels_right=filters ) # 2, 1 self.cell_1 = NormalCell( in_channels_left=2 * filters, out_channels_left=filters, # 2, 1 in_channels_right=6 * filters, out_channels_right=filters ) # 6, 1 self.cell_2 = NormalCell( in_channels_left=6 * filters, out_channels_left=filters, # 6, 1 in_channels_right=6 * filters, out_channels_right=filters ) # 6, 1 self.cell_3 = NormalCell( in_channels_left=6 * filters, out_channels_left=filters, # 6, 1 in_channels_right=6 * filters, out_channels_right=filters ) # 6, 1 self.reduction_cell_0 = ReductionCell0( in_channels_left=6 * filters, out_channels_left=2 * filters, # 6, 2 in_channels_right=6 * filters, out_channels_right=2 * filters ) # 6, 2 self.cell_6 = FirstCell( in_channels_left=6 * filters, out_channels_left=filters, # 6, 1 in_channels_right=8 * filters, out_channels_right=2 * filters ) # 8, 2 self.cell_7 = NormalCell( in_channels_left=8 * filters, out_channels_left=2 * filters, # 8, 2 in_channels_right=12 * filters, out_channels_right=2 * filters ) # 12, 2 self.cell_8 = NormalCell( in_channels_left=12 * filters, out_channels_left=2 * filters, # 12, 2 in_channels_right=12 * filters, out_channels_right=2 * filters ) # 12, 2 self.cell_9 = NormalCell( in_channels_left=12 * filters, out_channels_left=2 * filters, # 12, 2 in_channels_right=12 * filters, out_channels_right=2 * filters ) # 12, 2 self.reduction_cell_1 = ReductionCell1( in_channels_left=12 * filters, out_channels_left=4 * filters, # 12, 4 in_channels_right=12 * filters, out_channels_right=4 * filters ) # 12, 4 self.cell_12 = FirstCell( in_channels_left=12 * filters, out_channels_left=2 * filters, # 12, 2 in_channels_right=16 * filters, out_channels_right=4 * filters ) # 16, 4 self.cell_13 = NormalCell( in_channels_left=16 * filters, out_channels_left=4 * filters, # 16, 4 in_channels_right=24 * filters, out_channels_right=4 * filters ) # 24, 4 self.cell_14 = NormalCell( in_channels_left=24 * filters, out_channels_left=4 * filters, # 24, 4 in_channels_right=24 * filters, out_channels_right=4 * filters ) # 24, 4 self.cell_15 = NormalCell( in_channels_left=24 * filters, out_channels_left=4 * filters, # 24, 4 in_channels_right=24 * filters, out_channels_right=4 * filters ) # 24, 4 self.relu = nn.ReLU() self.dropout = nn.Dropout() self.classifier = nn.Linear(24 * filters, num_classes) self._init_params() def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def features(self, input): x_conv0 = self.conv0(input) x_stem_0 = self.cell_stem_0(x_conv0) x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) x_cell_0 = self.cell_0(x_stem_1, x_stem_0) x_cell_1 = self.cell_1(x_cell_0, x_stem_1) x_cell_2 = self.cell_2(x_cell_1, x_cell_0) x_cell_3 = self.cell_3(x_cell_2, x_cell_1) x_reduction_cell_0 = self.reduction_cell_0(x_cell_3, x_cell_2) x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_3) x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) x_cell_8 = self.cell_8(x_cell_7, x_cell_6) x_cell_9 = self.cell_9(x_cell_8, x_cell_7) x_reduction_cell_1 = self.reduction_cell_1(x_cell_9, x_cell_8) x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_9) x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) x_cell_14 = self.cell_14(x_cell_13, x_cell_12) x_cell_15 = self.cell_15(x_cell_14, x_cell_13) x_cell_15 = self.relu(x_cell_15) x_cell_15 = F.avg_pool2d( x_cell_15, x_cell_15.size()[2:] ) # global average pool x_cell_15 = x_cell_15.view(x_cell_15.size(0), -1) x_cell_15 = self.dropout(x_cell_15) return x_cell_15 def forward(self, input): v = self.features(input) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def nasnetamobile(num_classes, loss='softmax', pretrained=True, **kwargs): model = NASNetAMobile(num_classes, loss, **kwargs) if pretrained: model_url = pretrained_settings['nasnetamobile']['imagenet']['url'] init_pretrained_weights(model, model_url) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/osnet.py ================================================ from __future__ import division, absolute_import import warnings import torch from torch import nn from torch.nn import functional as F __all__ = [ 'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0' ] pretrained_urls = { 'osnet_x1_0': 'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY', 'osnet_x0_75': 'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq', 'osnet_x0_5': 'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i', 'osnet_x0_25': 'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs', 'osnet_ibn_x1_0': 'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l' } ########## # Basic layers ########## class ConvLayer(nn.Module): """Convolution layer (conv + bn + relu).""" def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, IN=False ): super(ConvLayer, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, groups=groups ) if IN: self.bn = nn.InstanceNorm2d(out_channels, affine=True) else: self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x class Conv1x1(nn.Module): """1x1 convolution + bn + relu.""" def __init__(self, in_channels, out_channels, stride=1, groups=1): super(Conv1x1, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 1, stride=stride, padding=0, bias=False, groups=groups ) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x class Conv1x1Linear(nn.Module): """1x1 convolution + bn (w/o non-linearity).""" def __init__(self, in_channels, out_channels, stride=1): super(Conv1x1Linear, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 1, stride=stride, padding=0, bias=False ) self.bn = nn.BatchNorm2d(out_channels) def forward(self, x): x = self.conv(x) x = self.bn(x) return x class Conv3x3(nn.Module): """3x3 convolution + bn + relu.""" def __init__(self, in_channels, out_channels, stride=1, groups=1): super(Conv3x3, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 3, stride=stride, padding=1, bias=False, groups=groups ) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x class LightConv3x3(nn.Module): """Lightweight 3x3 convolution. 1x1 (linear) + dw 3x3 (nonlinear). """ def __init__(self, in_channels, out_channels): super(LightConv3x3, self).__init__() self.conv1 = nn.Conv2d( in_channels, out_channels, 1, stride=1, padding=0, bias=False ) self.conv2 = nn.Conv2d( out_channels, out_channels, 3, stride=1, padding=1, bias=False, groups=out_channels ) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.bn(x) x = self.relu(x) return x ########## # Building blocks for omni-scale feature learning ########## class ChannelGate(nn.Module): """A mini-network that generates channel-wise gates conditioned on input tensor.""" def __init__( self, in_channels, num_gates=None, return_gates=False, gate_activation='sigmoid', reduction=16, layer_norm=False ): super(ChannelGate, self).__init__() if num_gates is None: num_gates = in_channels self.return_gates = return_gates self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d( in_channels, in_channels // reduction, kernel_size=1, bias=True, padding=0 ) self.norm1 = None if layer_norm: self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1)) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d( in_channels // reduction, num_gates, kernel_size=1, bias=True, padding=0 ) if gate_activation == 'sigmoid': self.gate_activation = nn.Sigmoid() elif gate_activation == 'relu': self.gate_activation = nn.ReLU(inplace=True) elif gate_activation == 'linear': self.gate_activation = None else: raise RuntimeError( "Unknown gate activation: {}".format(gate_activation) ) def forward(self, x): input = x x = self.global_avgpool(x) x = self.fc1(x) if self.norm1 is not None: x = self.norm1(x) x = self.relu(x) x = self.fc2(x) if self.gate_activation is not None: x = self.gate_activation(x) if self.return_gates: return x return input * x class OSBlock(nn.Module): """Omni-scale feature learning block.""" def __init__( self, in_channels, out_channels, IN=False, bottleneck_reduction=4, **kwargs ): super(OSBlock, self).__init__() mid_channels = out_channels // bottleneck_reduction self.conv1 = Conv1x1(in_channels, mid_channels) self.conv2a = LightConv3x3(mid_channels, mid_channels) self.conv2b = nn.Sequential( LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), ) self.conv2c = nn.Sequential( LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), ) self.conv2d = nn.Sequential( LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), ) self.gate = ChannelGate(mid_channels) self.conv3 = Conv1x1Linear(mid_channels, out_channels) self.downsample = None if in_channels != out_channels: self.downsample = Conv1x1Linear(in_channels, out_channels) self.IN = None if IN: self.IN = nn.InstanceNorm2d(out_channels, affine=True) def forward(self, x): identity = x x1 = self.conv1(x) x2a = self.conv2a(x1) x2b = self.conv2b(x1) x2c = self.conv2c(x1) x2d = self.conv2d(x1) x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d) x3 = self.conv3(x2) if self.downsample is not None: identity = self.downsample(identity) out = x3 + identity if self.IN is not None: out = self.IN(out) return F.relu(out) ########## # Network architecture ########## class OSNet(nn.Module): """Omni-Scale Network. Reference: - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019. - Zhou et al. Learning Generalisable Omni-Scale Representations for Person Re-Identification. TPAMI, 2021. """ def __init__( self, num_classes, blocks, layers, channels, feature_dim=512, loss='softmax', IN=False, **kwargs ): super(OSNet, self).__init__() num_blocks = len(blocks) assert num_blocks == len(layers) assert num_blocks == len(channels) - 1 self.loss = loss self.feature_dim = feature_dim # convolutional backbone self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) self.conv2 = self._make_layer( blocks[0], layers[0], channels[0], channels[1], reduce_spatial_size=True, IN=IN ) self.conv3 = self._make_layer( blocks[1], layers[1], channels[1], channels[2], reduce_spatial_size=True ) self.conv4 = self._make_layer( blocks[2], layers[2], channels[2], channels[3], reduce_spatial_size=False ) self.conv5 = Conv1x1(channels[3], channels[3]) self.global_avgpool = nn.AdaptiveAvgPool2d(1) # fully connected layer self.fc = self._construct_fc_layer( self.feature_dim, channels[3], dropout_p=None ) # identity classification layer self.classifier = nn.Linear(self.feature_dim, num_classes) self._init_params() def _make_layer( self, block, layer, in_channels, out_channels, reduce_spatial_size, IN=False ): layers = [] layers.append(block(in_channels, out_channels, IN=IN)) for i in range(1, layer): layers.append(block(out_channels, out_channels, IN=IN)) if reduce_spatial_size: layers.append( nn.Sequential( Conv1x1(out_channels, out_channels), nn.AvgPool2d(2, stride=2) ) ) return nn.Sequential(*layers) def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): if fc_dims is None or fc_dims < 0: self.feature_dim = input_dim return None if isinstance(fc_dims, int): fc_dims = [fc_dims] layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def featuremaps(self, x): x = self.conv1(x) x = self.maxpool(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.conv5(x) return x def forward(self, x, return_featuremaps=False): x = self.featuremaps(x) if return_featuremaps: return x v = self.global_avgpool(x) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError("Unsupported loss: {}".format(self.loss)) def init_pretrained_weights(model, key=''): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ import os import errno import gdown from collections import OrderedDict def _get_torch_home(): ENV_TORCH_HOME = 'TORCH_HOME' ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' DEFAULT_CACHE_DIR = '~/.cache' torch_home = os.path.expanduser( os.getenv( ENV_TORCH_HOME, os.path.join( os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch' ) ) ) return torch_home torch_home = _get_torch_home() model_dir = os.path.join(torch_home, 'checkpoints') try: os.makedirs(model_dir) except OSError as e: if e.errno == errno.EEXIST: # Directory already exists, ignore. pass else: # Unexpected OSError, re-raise. raise filename = key + '_imagenet.pth' cached_file = os.path.join(model_dir, filename) if not os.path.exists(cached_file): gdown.download(pretrained_urls[key], cached_file, quiet=False) state_dict = torch.load(cached_file) model_dict = model.state_dict() new_state_dict = OrderedDict() matched_layers, discarded_layers = [], [] for k, v in state_dict.items(): if k.startswith('module.'): k = k[7:] # discard module. if k in model_dict and model_dict[k].size() == v.size(): new_state_dict[k] = v matched_layers.append(k) else: discarded_layers.append(k) model_dict.update(new_state_dict) model.load_state_dict(model_dict) if len(matched_layers) == 0: warnings.warn( 'The pretrained weights from "{}" cannot be loaded, ' 'please check the key names manually ' '(** ignored and continue **)'.format(cached_file) ) else: print( 'Successfully loaded imagenet pretrained weights from "{}"'. format(cached_file) ) if len(discarded_layers) > 0: print( '** The following layers are discarded ' 'due to unmatched keys or layer size: {}'. format(discarded_layers) ) ########## # Instantiation ########## def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs): # standard size (width x1.0) model = OSNet( num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_x1_0') return model def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs): # medium size (width x0.75) model = OSNet( num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[48, 192, 288, 384], loss=loss, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_x0_75') return model def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs): # tiny size (width x0.5) model = OSNet( num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[32, 128, 192, 256], loss=loss, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_x0_5') return model def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs): # very tiny size (width x0.25) model = OSNet( num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[16, 64, 96, 128], loss=loss, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_x0_25') return model def osnet_ibn_x1_0( num_classes=1000, pretrained=True, loss='softmax', **kwargs ): # standard size (width x1.0) + IBN layer # Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018. model = OSNet( num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, IN=True, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_ibn_x1_0') return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/osnet_ain.py ================================================ from __future__ import division, absolute_import import warnings import torch from torch import nn from torch.nn import functional as F __all__ = [ 'osnet_ain_x1_0', 'osnet_ain_x0_75', 'osnet_ain_x0_5', 'osnet_ain_x0_25' ] pretrained_urls = { 'osnet_ain_x1_0': 'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo', 'osnet_ain_x0_75': 'https://drive.google.com/uc?id=1apy0hpsMypqstfencdH-jKIUEFOW4xoM', 'osnet_ain_x0_5': 'https://drive.google.com/uc?id=1KusKvEYyKGDTUBVRxRiz55G31wkihB6l', 'osnet_ain_x0_25': 'https://drive.google.com/uc?id=1SxQt2AvmEcgWNhaRb2xC4rP6ZwVDP0Wt' } ########## # Basic layers ########## class ConvLayer(nn.Module): """Convolution layer (conv + bn + relu).""" def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, IN=False ): super(ConvLayer, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, groups=groups ) if IN: self.bn = nn.InstanceNorm2d(out_channels, affine=True) else: self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU() def forward(self, x): x = self.conv(x) x = self.bn(x) return self.relu(x) class Conv1x1(nn.Module): """1x1 convolution + bn + relu.""" def __init__(self, in_channels, out_channels, stride=1, groups=1): super(Conv1x1, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 1, stride=stride, padding=0, bias=False, groups=groups ) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU() def forward(self, x): x = self.conv(x) x = self.bn(x) return self.relu(x) class Conv1x1Linear(nn.Module): """1x1 convolution + bn (w/o non-linearity).""" def __init__(self, in_channels, out_channels, stride=1, bn=True): super(Conv1x1Linear, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 1, stride=stride, padding=0, bias=False ) self.bn = None if bn: self.bn = nn.BatchNorm2d(out_channels) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) return x class Conv3x3(nn.Module): """3x3 convolution + bn + relu.""" def __init__(self, in_channels, out_channels, stride=1, groups=1): super(Conv3x3, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 3, stride=stride, padding=1, bias=False, groups=groups ) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU() def forward(self, x): x = self.conv(x) x = self.bn(x) return self.relu(x) class LightConv3x3(nn.Module): """Lightweight 3x3 convolution. 1x1 (linear) + dw 3x3 (nonlinear). """ def __init__(self, in_channels, out_channels): super(LightConv3x3, self).__init__() self.conv1 = nn.Conv2d( in_channels, out_channels, 1, stride=1, padding=0, bias=False ) self.conv2 = nn.Conv2d( out_channels, out_channels, 3, stride=1, padding=1, bias=False, groups=out_channels ) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU() def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.bn(x) return self.relu(x) class LightConvStream(nn.Module): """Lightweight convolution stream.""" def __init__(self, in_channels, out_channels, depth): super(LightConvStream, self).__init__() assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format( depth ) layers = [] layers += [LightConv3x3(in_channels, out_channels)] for i in range(depth - 1): layers += [LightConv3x3(out_channels, out_channels)] self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) ########## # Building blocks for omni-scale feature learning ########## class ChannelGate(nn.Module): """A mini-network that generates channel-wise gates conditioned on input tensor.""" def __init__( self, in_channels, num_gates=None, return_gates=False, gate_activation='sigmoid', reduction=16, layer_norm=False ): super(ChannelGate, self).__init__() if num_gates is None: num_gates = in_channels self.return_gates = return_gates self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d( in_channels, in_channels // reduction, kernel_size=1, bias=True, padding=0 ) self.norm1 = None if layer_norm: self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1)) self.relu = nn.ReLU() self.fc2 = nn.Conv2d( in_channels // reduction, num_gates, kernel_size=1, bias=True, padding=0 ) if gate_activation == 'sigmoid': self.gate_activation = nn.Sigmoid() elif gate_activation == 'relu': self.gate_activation = nn.ReLU() elif gate_activation == 'linear': self.gate_activation = None else: raise RuntimeError( "Unknown gate activation: {}".format(gate_activation) ) def forward(self, x): input = x x = self.global_avgpool(x) x = self.fc1(x) if self.norm1 is not None: x = self.norm1(x) x = self.relu(x) x = self.fc2(x) if self.gate_activation is not None: x = self.gate_activation(x) if self.return_gates: return x return input * x class OSBlock(nn.Module): """Omni-scale feature learning block.""" def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs): super(OSBlock, self).__init__() assert T >= 1 assert out_channels >= reduction and out_channels % reduction == 0 mid_channels = out_channels // reduction self.conv1 = Conv1x1(in_channels, mid_channels) self.conv2 = nn.ModuleList() for t in range(1, T + 1): self.conv2 += [LightConvStream(mid_channels, mid_channels, t)] self.gate = ChannelGate(mid_channels) self.conv3 = Conv1x1Linear(mid_channels, out_channels) self.downsample = None if in_channels != out_channels: self.downsample = Conv1x1Linear(in_channels, out_channels) def forward(self, x): identity = x x1 = self.conv1(x) x2 = 0 for conv2_t in self.conv2: x2_t = conv2_t(x1) x2 = x2 + self.gate(x2_t) x3 = self.conv3(x2) if self.downsample is not None: identity = self.downsample(identity) out = x3 + identity return F.relu(out) class OSBlockINin(nn.Module): """Omni-scale feature learning block with instance normalization.""" def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs): super(OSBlockINin, self).__init__() assert T >= 1 assert out_channels >= reduction and out_channels % reduction == 0 mid_channels = out_channels // reduction self.conv1 = Conv1x1(in_channels, mid_channels) self.conv2 = nn.ModuleList() for t in range(1, T + 1): self.conv2 += [LightConvStream(mid_channels, mid_channels, t)] self.gate = ChannelGate(mid_channels) self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False) self.downsample = None if in_channels != out_channels: self.downsample = Conv1x1Linear(in_channels, out_channels) self.IN = nn.InstanceNorm2d(out_channels, affine=True) def forward(self, x): identity = x x1 = self.conv1(x) x2 = 0 for conv2_t in self.conv2: x2_t = conv2_t(x1) x2 = x2 + self.gate(x2_t) x3 = self.conv3(x2) x3 = self.IN(x3) # IN inside residual if self.downsample is not None: identity = self.downsample(identity) out = x3 + identity return F.relu(out) ########## # Network architecture ########## class OSNet(nn.Module): """Omni-Scale Network. Reference: - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019. - Zhou et al. Learning Generalisable Omni-Scale Representations for Person Re-Identification. TPAMI, 2021. """ def __init__( self, num_classes, blocks, layers, channels, feature_dim=512, loss='softmax', conv1_IN=False, **kwargs ): super(OSNet, self).__init__() num_blocks = len(blocks) assert num_blocks == len(layers) assert num_blocks == len(channels) - 1 self.loss = loss self.feature_dim = feature_dim # convolutional backbone self.conv1 = ConvLayer( 3, channels[0], 7, stride=2, padding=3, IN=conv1_IN ) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) self.conv2 = self._make_layer( blocks[0], layers[0], channels[0], channels[1] ) self.pool2 = nn.Sequential( Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2) ) self.conv3 = self._make_layer( blocks[1], layers[1], channels[1], channels[2] ) self.pool3 = nn.Sequential( Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2) ) self.conv4 = self._make_layer( blocks[2], layers[2], channels[2], channels[3] ) self.conv5 = Conv1x1(channels[3], channels[3]) self.global_avgpool = nn.AdaptiveAvgPool2d(1) # fully connected layer self.fc = self._construct_fc_layer( self.feature_dim, channels[3], dropout_p=None ) # identity classification layer self.classifier = nn.Linear(self.feature_dim, num_classes) self._init_params() def _make_layer(self, blocks, layer, in_channels, out_channels): layers = [] layers += [blocks[0](in_channels, out_channels)] for i in range(1, len(blocks)): layers += [blocks[i](out_channels, out_channels)] return nn.Sequential(*layers) def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): if fc_dims is None or fc_dims < 0: self.feature_dim = input_dim return None if isinstance(fc_dims, int): fc_dims = [fc_dims] layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU()) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.InstanceNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def featuremaps(self, x): x = self.conv1(x) x = self.maxpool(x) x = self.conv2(x) x = self.pool2(x) x = self.conv3(x) x = self.pool3(x) x = self.conv4(x) x = self.conv5(x) return x def forward(self, x, return_featuremaps=False): x = self.featuremaps(x) if return_featuremaps: return x v = self.global_avgpool(x) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError("Unsupported loss: {}".format(self.loss)) def init_pretrained_weights(model, key=''): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ import os import errno import gdown from collections import OrderedDict def _get_torch_home(): ENV_TORCH_HOME = 'TORCH_HOME' ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' DEFAULT_CACHE_DIR = '~/.cache' torch_home = os.path.expanduser( os.getenv( ENV_TORCH_HOME, os.path.join( os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch' ) ) ) return torch_home torch_home = _get_torch_home() model_dir = os.path.join(torch_home, 'checkpoints') try: os.makedirs(model_dir) except OSError as e: if e.errno == errno.EEXIST: # Directory already exists, ignore. pass else: # Unexpected OSError, re-raise. raise filename = key + '_imagenet.pth' cached_file = os.path.join(model_dir, filename) if not os.path.exists(cached_file): gdown.download(pretrained_urls[key], cached_file, quiet=False) state_dict = torch.load(cached_file) model_dict = model.state_dict() new_state_dict = OrderedDict() matched_layers, discarded_layers = [], [] for k, v in state_dict.items(): if k.startswith('module.'): k = k[7:] # discard module. if k in model_dict and model_dict[k].size() == v.size(): new_state_dict[k] = v matched_layers.append(k) else: discarded_layers.append(k) model_dict.update(new_state_dict) model.load_state_dict(model_dict) if len(matched_layers) == 0: warnings.warn( 'The pretrained weights from "{}" cannot be loaded, ' 'please check the key names manually ' '(** ignored and continue **)'.format(cached_file) ) else: print( 'Successfully loaded imagenet pretrained weights from "{}"'. format(cached_file) ) if len(discarded_layers) > 0: print( '** The following layers are discarded ' 'due to unmatched keys or layer size: {}'. format(discarded_layers) ) ########## # Instantiation ########## def osnet_ain_x1_0( num_classes=1000, pretrained=True, loss='softmax', **kwargs ): model = OSNet( num_classes, blocks=[ [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], [OSBlockINin, OSBlock] ], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, conv1_IN=True, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_ain_x1_0') return model def osnet_ain_x0_75( num_classes=1000, pretrained=True, loss='softmax', **kwargs ): model = OSNet( num_classes, blocks=[ [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], [OSBlockINin, OSBlock] ], layers=[2, 2, 2], channels=[48, 192, 288, 384], loss=loss, conv1_IN=True, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_ain_x0_75') return model def osnet_ain_x0_5( num_classes=1000, pretrained=True, loss='softmax', **kwargs ): model = OSNet( num_classes, blocks=[ [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], [OSBlockINin, OSBlock] ], layers=[2, 2, 2], channels=[32, 128, 192, 256], loss=loss, conv1_IN=True, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_ain_x0_5') return model def osnet_ain_x0_25( num_classes=1000, pretrained=True, loss='softmax', **kwargs ): model = OSNet( num_classes, blocks=[ [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], [OSBlockINin, OSBlock] ], layers=[2, 2, 2], channels=[16, 64, 96, 128], loss=loss, conv1_IN=True, **kwargs ) if pretrained: init_pretrained_weights(model, key='osnet_ain_x0_25') return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/pcb.py ================================================ from __future__ import division, absolute_import import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import functional as F __all__ = ['pcb_p6', 'pcb_p4'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False ) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=stride, padding=1, bias=False ) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d( planes, planes * self.expansion, kernel_size=1, bias=False ) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class DimReduceLayer(nn.Module): def __init__(self, in_channels, out_channels, nonlinear): super(DimReduceLayer, self).__init__() layers = [] layers.append( nn.Conv2d( in_channels, out_channels, 1, stride=1, padding=0, bias=False ) ) layers.append(nn.BatchNorm2d(out_channels)) if nonlinear == 'relu': layers.append(nn.ReLU(inplace=True)) elif nonlinear == 'leakyrelu': layers.append(nn.LeakyReLU(0.1)) self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) class PCB(nn.Module): """Part-based Convolutional Baseline. Reference: Sun et al. Beyond Part Models: Person Retrieval with Refined Part Pooling (and A Strong Convolutional Baseline). ECCV 2018. Public keys: - ``pcb_p4``: PCB with 4-part strips. - ``pcb_p6``: PCB with 6-part strips. """ def __init__( self, num_classes, loss, block, layers, parts=6, reduced_dim=256, nonlinear='relu', **kwargs ): self.inplanes = 64 super(PCB, self).__init__() self.loss = loss self.parts = parts self.feature_dim = 512 * block.expansion # backbone network self.conv1 = nn.Conv2d( 3, 64, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=1) # pcb layers self.parts_avgpool = nn.AdaptiveAvgPool2d((self.parts, 1)) self.dropout = nn.Dropout(p=0.5) self.conv5 = DimReduceLayer( 512 * block.expansion, reduced_dim, nonlinear=nonlinear ) self.feature_dim = reduced_dim self.classifier = nn.ModuleList( [ nn.Linear(self.feature_dim, num_classes) for _ in range(self.parts) ] ) self._init_params() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False ), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def featuremaps(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward(self, x): f = self.featuremaps(x) v_g = self.parts_avgpool(f) if not self.training: v_g = F.normalize(v_g, p=2, dim=1) return v_g.view(v_g.size(0), -1) v_g = self.dropout(v_g) v_h = self.conv5(v_g) y = [] for i in range(self.parts): v_h_i = v_h[:, :, i, :] v_h_i = v_h_i.view(v_h_i.size(0), -1) y_i = self.classifier[i](v_h_i) y.append(y_i) if self.loss == 'softmax': return y elif self.loss == 'triplet': v_g = F.normalize(v_g, p=2, dim=1) return y, v_g.view(v_g.size(0), -1) else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def pcb_p6(num_classes, loss='softmax', pretrained=True, **kwargs): model = PCB( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=1, parts=6, reduced_dim=256, nonlinear='relu', **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet50']) return model def pcb_p4(num_classes, loss='softmax', pretrained=True, **kwargs): model = PCB( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=1, parts=4, reduced_dim=256, nonlinear='relu', **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet50']) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/resnet.py ================================================ """ Code source: https://github.com/pytorch/vision """ from __future__ import division, absolute_import import torch.utils.model_zoo as model_zoo from torch import nn __all__ = [ 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512' ] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', } def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation ) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d( in_planes, out_planes, kernel_size=1, stride=stride, bias=False ) class BasicBlock(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None ): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64' ) if dilation > 1: raise NotImplementedError( "Dilation > 1 not supported in BasicBlock" ) # Both self.conv1 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None ): super(Bottleneck, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width/64.)) * groups # Both self.conv2 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): """Residual network. Reference: - He et al. Deep Residual Learning for Image Recognition. CVPR 2016. - Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017. Public keys: - ``resnet18``: ResNet18. - ``resnet34``: ResNet34. - ``resnet50``: ResNet50. - ``resnet101``: ResNet101. - ``resnet152``: ResNet152. - ``resnext50_32x4d``: ResNeXt50. - ``resnext101_32x8d``: ResNeXt101. - ``resnet50_fc512``: ResNet50 + FC. """ def __init__( self, num_classes, loss, block, layers, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, last_stride=2, fc_dims=None, dropout_p=None, **kwargs ): super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.loss = loss self.feature_dim = 512 * block.expansion self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError( "replace_stride_with_dilation should be None " "or a 3-element tuple, got {}". format(replace_stride_with_dilation) ) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d( 3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer( block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0] ) self.layer3 = self._make_layer( block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1] ) self.layer4 = self._make_layer( block, 512, layers[3], stride=last_stride, dilate=replace_stride_with_dilation[2] ) self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = self._construct_fc_layer( fc_dims, 512 * block.expansion, dropout_p ) self.classifier = nn.Linear(self.feature_dim, num_classes) self._init_params() # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion), ) layers = [] layers.append( block( self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer ) ) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append( block( self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer ) ) return nn.Sequential(*layers) def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """Constructs fully connected layer Args: fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed input_dim (int): input dimension dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def featuremaps(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward(self, x): f = self.featuremaps(x) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError("Unsupported loss: {}".format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) """ResNet""" def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNet( num_classes=num_classes, loss=loss, block=BasicBlock, layers=[2, 2, 2, 2], last_stride=2, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet18']) return model def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNet( num_classes=num_classes, loss=loss, block=BasicBlock, layers=[3, 4, 6, 3], last_stride=2, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet34']) return model def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNet( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=2, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet50']) return model def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNet( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 23, 3], last_stride=2, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet101']) return model def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNet( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 8, 36, 3], last_stride=2, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet152']) return model """ResNeXt""" def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNet( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=2, fc_dims=None, dropout_p=None, groups=32, width_per_group=4, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnext50_32x4d']) return model def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNet( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 23, 3], last_stride=2, fc_dims=None, dropout_p=None, groups=32, width_per_group=8, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnext101_32x8d']) return model """ ResNet + FC """ def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNet( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=1, fc_dims=[512], dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet50']) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/resnet_ibn_a.py ================================================ """ Credit to https://github.com/XingangPan/IBN-Net. """ from __future__ import division, absolute_import import math import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo __all__ = ['resnet50_ibn_a'] model_urls = { 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False ) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class IBN(nn.Module): def __init__(self, planes): super(IBN, self).__init__() half1 = int(planes / 2) self.half = half1 half2 = planes - half1 self.IN = nn.InstanceNorm2d(half1, affine=True) self.BN = nn.BatchNorm2d(half2) def forward(self, x): split = torch.split(x, self.half, 1) out1 = self.IN(split[0].contiguous()) out2 = self.BN(split[1].contiguous()) out = torch.cat((out1, out2), 1) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, ibn=False, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) if ibn: self.bn1 = IBN(planes) else: self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=stride, padding=1, bias=False ) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d( planes, planes * self.expansion, kernel_size=1, bias=False ) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): """Residual network + IBN layer. Reference: - He et al. Deep Residual Learning for Image Recognition. CVPR 2016. - Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV 2018. """ def __init__( self, block, layers, num_classes=1000, loss='softmax', fc_dims=None, dropout_p=None, **kwargs ): scale = 64 self.inplanes = scale super(ResNet, self).__init__() self.loss = loss self.feature_dim = scale * 8 * block.expansion self.conv1 = nn.Conv2d( 3, scale, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = nn.BatchNorm2d(scale) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, scale, layers[0]) self.layer2 = self._make_layer(block, scale * 2, layers[1], stride=2) self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2) self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = self._construct_fc_layer( fc_dims, scale * 8 * block.expansion, dropout_p ) self.classifier = nn.Linear(self.feature_dim, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.InstanceNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False ), nn.BatchNorm2d(planes * block.expansion), ) layers = [] ibn = True if planes == 512: ibn = False layers.append(block(self.inplanes, planes, ibn, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, ibn)) return nn.Sequential(*layers) def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """Constructs fully connected layer Args: fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed input_dim (int): input dimension dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def featuremaps(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward(self, x): f = self.featuremaps(x) v = self.avgpool(f) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError("Unsupported loss: {}".format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def resnet50_ibn_a(num_classes, loss='softmax', pretrained=False, **kwargs): model = ResNet( Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet50']) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/resnet_ibn_b.py ================================================ """ Credit to https://github.com/XingangPan/IBN-Net. """ from __future__ import division, absolute_import import math import torch.nn as nn import torch.utils.model_zoo as model_zoo __all__ = ['resnet50_ibn_b'] model_urls = { 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False ) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, IN=False): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=stride, padding=1, bias=False ) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d( planes, planes * self.expansion, kernel_size=1, bias=False ) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.IN = None if IN: self.IN = nn.InstanceNorm2d(planes * 4, affine=True) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual if self.IN is not None: out = self.IN(out) out = self.relu(out) return out class ResNet(nn.Module): """Residual network + IBN layer. Reference: - He et al. Deep Residual Learning for Image Recognition. CVPR 2016. - Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV 2018. """ def __init__( self, block, layers, num_classes=1000, loss='softmax', fc_dims=None, dropout_p=None, **kwargs ): scale = 64 self.inplanes = scale super(ResNet, self).__init__() self.loss = loss self.feature_dim = scale * 8 * block.expansion self.conv1 = nn.Conv2d( 3, scale, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = nn.InstanceNorm2d(scale, affine=True) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer( block, scale, layers[0], stride=1, IN=True ) self.layer2 = self._make_layer( block, scale * 2, layers[1], stride=2, IN=True ) self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2) self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = self._construct_fc_layer( fc_dims, scale * 8 * block.expansion, dropout_p ) self.classifier = nn.Linear(self.feature_dim, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.InstanceNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, IN=False): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False ), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks - 1): layers.append(block(self.inplanes, planes)) layers.append(block(self.inplanes, planes, IN=IN)) return nn.Sequential(*layers) def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """Constructs fully connected layer Args: fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed input_dim (int): input dimension dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def featuremaps(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward(self, x): f = self.featuremaps(x) v = self.avgpool(f) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError("Unsupported loss: {}".format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def resnet50_ibn_b(num_classes, loss='softmax', pretrained=False, **kwargs): model = ResNet( Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet50']) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/resnetmid.py ================================================ from __future__ import division, absolute_import import torch import torch.utils.model_zoo as model_zoo from torch import nn __all__ = ['resnet50mid'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False ) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=stride, padding=1, bias=False ) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d( planes, planes * self.expansion, kernel_size=1, bias=False ) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNetMid(nn.Module): """Residual network + mid-level features. Reference: Yu et al. The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching. arXiv:1711.08106. Public keys: - ``resnet50mid``: ResNet50 + mid-level feature fusion. """ def __init__( self, num_classes, loss, block, layers, last_stride=2, fc_dims=None, **kwargs ): self.inplanes = 64 super(ResNetMid, self).__init__() self.loss = loss self.feature_dim = 512 * block.expansion # backbone network self.conv1 = nn.Conv2d( 3, 64, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer( block, 512, layers[3], stride=last_stride ) self.global_avgpool = nn.AdaptiveAvgPool2d(1) assert fc_dims is not None self.fc_fusion = self._construct_fc_layer( fc_dims, 512 * block.expansion * 2 ) self.feature_dim += 512 * block.expansion self.classifier = nn.Linear(self.feature_dim, num_classes) self._init_params() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False ), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """Constructs fully connected layer Args: fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed input_dim (int): input dimension dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def featuremaps(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x4a = self.layer4[0](x) x4b = self.layer4[1](x4a) x4c = self.layer4[2](x4b) return x4a, x4b, x4c def forward(self, x): x4a, x4b, x4c = self.featuremaps(x) v4a = self.global_avgpool(x4a) v4b = self.global_avgpool(x4b) v4c = self.global_avgpool(x4c) v4ab = torch.cat([v4a, v4b], 1) v4ab = v4ab.view(v4ab.size(0), -1) v4ab = self.fc_fusion(v4ab) v4c = v4c.view(v4c.size(0), -1) v = torch.cat([v4ab, v4c], 1) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) """ Residual network configurations: -- resnet18: block=BasicBlock, layers=[2, 2, 2, 2] resnet34: block=BasicBlock, layers=[3, 4, 6, 3] resnet50: block=Bottleneck, layers=[3, 4, 6, 3] resnet101: block=Bottleneck, layers=[3, 4, 23, 3] resnet152: block=Bottleneck, layers=[3, 8, 36, 3] """ def resnet50mid(num_classes, loss='softmax', pretrained=True, **kwargs): model = ResNetMid( num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=2, fc_dims=[1024], **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['resnet50']) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/senet.py ================================================ from __future__ import division, absolute_import import math from collections import OrderedDict import torch.nn as nn from torch.utils import model_zoo __all__ = [ 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnet50_fc512' ] """ Code imported from https://github.com/Cadene/pretrained-models.pytorch """ pretrained_settings = { 'senet154': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnet50': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnet101': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnet152': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnext50_32x4d': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnext101_32x4d': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, } class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d( channels, channels // reduction, kernel_size=1, padding=0 ) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d( channels // reduction, channels, kernel_size=1, padding=0 ) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class Bottleneck(nn.Module): """ Base class for bottlenecks that implements `forward()` method. """ def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out = self.se_module(out) + residual out = self.relu(out) return out class SEBottleneck(Bottleneck): """ Bottleneck for SENet154. """ expansion = 4 def __init__( self, inplanes, planes, groups, reduction, stride=1, downsample=None ): super(SEBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes * 2) self.conv2 = nn.Conv2d( planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False ) self.bn2 = nn.BatchNorm2d(planes * 4) self.conv3 = nn.Conv2d( planes * 4, planes * 4, kernel_size=1, bias=False ) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBottleneck(Bottleneck): """ ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe implementation and uses `stride=stride` in `conv1` and not in `conv2` (the latter is used in the torchvision implementation of ResNet). """ expansion = 4 def __init__( self, inplanes, planes, groups, reduction, stride=1, downsample=None ): super(SEResNetBottleneck, self).__init__() self.conv1 = nn.Conv2d( inplanes, planes, kernel_size=1, bias=False, stride=stride ) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, padding=1, groups=groups, bias=False ) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNeXtBottleneck(Bottleneck): """ResNeXt bottleneck type C with a Squeeze-and-Excitation module""" expansion = 4 def __init__( self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4 ): super(SEResNeXtBottleneck, self).__init__() width = int(math.floor(planes * (base_width/64.)) * groups) self.conv1 = nn.Conv2d( inplanes, width, kernel_size=1, bias=False, stride=1 ) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d( width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False ) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SENet(nn.Module): """Squeeze-and-excitation network. Reference: Hu et al. Squeeze-and-Excitation Networks. CVPR 2018. Public keys: - ``senet154``: SENet154. - ``se_resnet50``: ResNet50 + SE. - ``se_resnet101``: ResNet101 + SE. - ``se_resnet152``: ResNet152 + SE. - ``se_resnext50_32x4d``: ResNeXt50 (groups=32, width=4) + SE. - ``se_resnext101_32x4d``: ResNeXt101 (groups=32, width=4) + SE. - ``se_resnet50_fc512``: (ResNet50 + SE) + FC. """ def __init__( self, num_classes, loss, block, layers, groups, reduction, dropout_p=0.2, inplanes=128, input_3x3=True, downsample_kernel_size=3, downsample_padding=1, last_stride=2, fc_dims=None, **kwargs ): """ Parameters ---------- block (nn.Module): Bottleneck class. - For SENet154: SEBottleneck - For SE-ResNet models: SEResNetBottleneck - For SE-ResNeXt models: SEResNeXtBottleneck layers (list of ints): Number of residual blocks for 4 layers of the network (layer1...layer4). groups (int): Number of groups for the 3x3 convolution in each bottleneck block. - For SENet154: 64 - For SE-ResNet models: 1 - For SE-ResNeXt models: 32 reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - For all models: 16 dropout_p (float or None): Drop probability for the Dropout layer. If `None` the Dropout layer is not used. - For SENet154: 0.2 - For SE-ResNet models: None - For SE-ResNeXt models: None inplanes (int): Number of input channels for layer1. - For SENet154: 128 - For SE-ResNet models: 64 - For SE-ResNeXt models: 64 input_3x3 (bool): If `True`, use three 3x3 convolutions instead of a single 7x7 convolution in layer0. - For SENet154: True - For SE-ResNet models: False - For SE-ResNeXt models: False downsample_kernel_size (int): Kernel size for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 3 - For SE-ResNet models: 1 - For SE-ResNeXt models: 1 downsample_padding (int): Padding for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 1 - For SE-ResNet models: 0 - For SE-ResNeXt models: 0 num_classes (int): Number of outputs in `classifier` layer. """ super(SENet, self).__init__() self.inplanes = inplanes self.loss = loss if input_3x3: layer0_modules = [ ( 'conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False) ), ('bn1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)), ( 'conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False) ), ('bn2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)), ( 'conv3', nn.Conv2d( 64, inplanes, 3, stride=1, padding=1, bias=False ) ), ('bn3', nn.BatchNorm2d(inplanes)), ('relu3', nn.ReLU(inplace=True)), ] else: layer0_modules = [ ( 'conv1', nn.Conv2d( 3, inplanes, kernel_size=7, stride=2, padding=3, bias=False ) ), ('bn1', nn.BatchNorm2d(inplanes)), ('relu1', nn.ReLU(inplace=True)), ] # To preserve compatibility with Caffe weights `ceil_mode=True` # is used instead of `padding=1`. layer0_modules.append( ('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)) ) self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) self.layer1 = self._make_layer( block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0 ) self.layer2 = self._make_layer( block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.layer3 = self._make_layer( block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.layer4 = self._make_layer( block, planes=512, blocks=layers[3], stride=last_stride, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.fc = self._construct_fc_layer( fc_dims, 512 * block.expansion, dropout_p ) self.classifier = nn.Linear(self.feature_dim, num_classes) def _make_layer( self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0 ): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False ), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append( block( self.inplanes, planes, groups, reduction, stride, downsample ) ) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, groups, reduction)) return nn.Sequential(*layers) def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """ Construct fully connected layer - fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed - input_dim (int): input dimension - dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def featuremaps(self, x): x = self.layer0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward(self, x): f = self.featuremaps(x) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError("Unsupported loss: {}".format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def senet154(num_classes, loss='softmax', pretrained=True, **kwargs): model = SENet( num_classes=num_classes, loss=loss, block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, dropout_p=0.2, last_stride=2, fc_dims=None, **kwargs ) if pretrained: model_url = pretrained_settings['senet154']['imagenet']['url'] init_pretrained_weights(model, model_url) return model def se_resnet50(num_classes, loss='softmax', pretrained=True, **kwargs): model = SENet( num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs ) if pretrained: model_url = pretrained_settings['se_resnet50']['imagenet']['url'] init_pretrained_weights(model, model_url) return model def se_resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs): model = SENet( num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=1, fc_dims=[512], **kwargs ) if pretrained: model_url = pretrained_settings['se_resnet50']['imagenet']['url'] init_pretrained_weights(model, model_url) return model def se_resnet101(num_classes, loss='softmax', pretrained=True, **kwargs): model = SENet( num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs ) if pretrained: model_url = pretrained_settings['se_resnet101']['imagenet']['url'] init_pretrained_weights(model, model_url) return model def se_resnet152(num_classes, loss='softmax', pretrained=True, **kwargs): model = SENet( num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs ) if pretrained: model_url = pretrained_settings['se_resnet152']['imagenet']['url'] init_pretrained_weights(model, model_url) return model def se_resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs): model = SENet( num_classes=num_classes, loss=loss, block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs ) if pretrained: model_url = pretrained_settings['se_resnext50_32x4d']['imagenet']['url' ] init_pretrained_weights(model, model_url) return model def se_resnext101_32x4d( num_classes, loss='softmax', pretrained=True, **kwargs ): model = SENet( num_classes=num_classes, loss=loss, block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs ) if pretrained: model_url = pretrained_settings['se_resnext101_32x4d']['imagenet'][ 'url'] init_pretrained_weights(model, model_url) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/shufflenet.py ================================================ from __future__ import division, absolute_import import torch import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import functional as F __all__ = ['shufflenet'] model_urls = { # training epoch = 90, top1 = 61.8 'imagenet': 'https://mega.nz/#!RDpUlQCY!tr_5xBEkelzDjveIYBBcGcovNCOrgfiJO9kiidz9fZM', } class ChannelShuffle(nn.Module): def __init__(self, num_groups): super(ChannelShuffle, self).__init__() self.g = num_groups def forward(self, x): b, c, h, w = x.size() n = c // self.g # reshape x = x.view(b, self.g, n, h, w) # transpose x = x.permute(0, 2, 1, 3, 4).contiguous() # flatten x = x.view(b, c, h, w) return x class Bottleneck(nn.Module): def __init__( self, in_channels, out_channels, stride, num_groups, group_conv1x1=True ): super(Bottleneck, self).__init__() assert stride in [1, 2], 'Warning: stride must be either 1 or 2' self.stride = stride mid_channels = out_channels // 4 if stride == 2: out_channels -= in_channels # group conv is not applied to first conv1x1 at stage 2 num_groups_conv1x1 = num_groups if group_conv1x1 else 1 self.conv1 = nn.Conv2d( in_channels, mid_channels, 1, groups=num_groups_conv1x1, bias=False ) self.bn1 = nn.BatchNorm2d(mid_channels) self.shuffle1 = ChannelShuffle(num_groups) self.conv2 = nn.Conv2d( mid_channels, mid_channels, 3, stride=stride, padding=1, groups=mid_channels, bias=False ) self.bn2 = nn.BatchNorm2d(mid_channels) self.conv3 = nn.Conv2d( mid_channels, out_channels, 1, groups=num_groups, bias=False ) self.bn3 = nn.BatchNorm2d(out_channels) if stride == 2: self.shortcut = nn.AvgPool2d(3, stride=2, padding=1) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.shuffle1(out) out = self.bn2(self.conv2(out)) out = self.bn3(self.conv3(out)) if self.stride == 2: res = self.shortcut(x) out = F.relu(torch.cat([res, out], 1)) else: out = F.relu(x + out) return out # configuration of (num_groups: #out_channels) based on Table 1 in the paper cfg = { 1: [144, 288, 576], 2: [200, 400, 800], 3: [240, 480, 960], 4: [272, 544, 1088], 8: [384, 768, 1536], } class ShuffleNet(nn.Module): """ShuffleNet. Reference: Zhang et al. ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices. CVPR 2018. Public keys: - ``shufflenet``: ShuffleNet (groups=3). """ def __init__(self, num_classes, loss='softmax', num_groups=3, **kwargs): super(ShuffleNet, self).__init__() self.loss = loss self.conv1 = nn.Sequential( nn.Conv2d(3, 24, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(24), nn.ReLU(), nn.MaxPool2d(3, stride=2, padding=1), ) self.stage2 = nn.Sequential( Bottleneck( 24, cfg[num_groups][0], 2, num_groups, group_conv1x1=False ), Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups), Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups), Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups), ) self.stage3 = nn.Sequential( Bottleneck(cfg[num_groups][0], cfg[num_groups][1], 2, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), ) self.stage4 = nn.Sequential( Bottleneck(cfg[num_groups][1], cfg[num_groups][2], 2, num_groups), Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups), Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups), Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups), ) self.classifier = nn.Linear(cfg[num_groups][2], num_classes) self.feat_dim = cfg[num_groups][2] def forward(self, x): x = self.conv1(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), -1) if not self.training: return x y = self.classifier(x) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, x else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def shufflenet(num_classes, loss='softmax', pretrained=True, **kwargs): model = ShuffleNet(num_classes, loss, **kwargs) if pretrained: # init_pretrained_weights(model, model_urls['imagenet']) import warnings warnings.warn( 'The imagenet pretrained weights need to be manually downloaded from {}' .format(model_urls['imagenet']) ) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/shufflenetv2.py ================================================ """ Code source: https://github.com/pytorch/vision """ from __future__ import division, absolute_import import torch import torch.utils.model_zoo as model_zoo from torch import nn __all__ = [ 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0' ] model_urls = { 'shufflenetv2_x0.5': 'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth', 'shufflenetv2_x1.0': 'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth', 'shufflenetv2_x1.5': None, 'shufflenetv2_x2.0': None, } def channel_shuffle(x, groups): batchsize, num_channels, height, width = x.data.size() channels_per_group = num_channels // groups # reshape x = x.view(batchsize, groups, channels_per_group, height, width) x = torch.transpose(x, 1, 2).contiguous() # flatten x = x.view(batchsize, -1, height, width) return x class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride): super(InvertedResidual, self).__init__() if not (1 <= stride <= 3): raise ValueError('illegal stride value') self.stride = stride branch_features = oup // 2 assert (self.stride != 1) or (inp == branch_features << 1) if self.stride > 1: self.branch1 = nn.Sequential( self.depthwise_conv( inp, inp, kernel_size=3, stride=self.stride, padding=1 ), nn.BatchNorm2d(inp), nn.Conv2d( inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False ), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True), ) self.branch2 = nn.Sequential( nn.Conv2d( inp if (self.stride > 1) else branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False ), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True), self.depthwise_conv( branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1 ), nn.BatchNorm2d(branch_features), nn.Conv2d( branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False ), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True), ) @staticmethod def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False): return nn.Conv2d( i, o, kernel_size, stride, padding, bias=bias, groups=i ) def forward(self, x): if self.stride == 1: x1, x2 = x.chunk(2, dim=1) out = torch.cat((x1, self.branch2(x2)), dim=1) else: out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) out = channel_shuffle(out, 2) return out class ShuffleNetV2(nn.Module): """ShuffleNetV2. Reference: Ma et al. ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design. ECCV 2018. Public keys: - ``shufflenet_v2_x0_5``: ShuffleNetV2 x0.5. - ``shufflenet_v2_x1_0``: ShuffleNetV2 x1.0. - ``shufflenet_v2_x1_5``: ShuffleNetV2 x1.5. - ``shufflenet_v2_x2_0``: ShuffleNetV2 x2.0. """ def __init__( self, num_classes, loss, stages_repeats, stages_out_channels, **kwargs ): super(ShuffleNetV2, self).__init__() self.loss = loss if len(stages_repeats) != 3: raise ValueError( 'expected stages_repeats as list of 3 positive ints' ) if len(stages_out_channels) != 5: raise ValueError( 'expected stages_out_channels as list of 5 positive ints' ) self._stage_out_channels = stages_out_channels input_channels = 3 output_channels = self._stage_out_channels[0] self.conv1 = nn.Sequential( nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True), ) input_channels = output_channels self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) stage_names = ['stage{}'.format(i) for i in [2, 3, 4]] for name, repeats, output_channels in zip( stage_names, stages_repeats, self._stage_out_channels[1:] ): seq = [InvertedResidual(input_channels, output_channels, 2)] for i in range(repeats - 1): seq.append( InvertedResidual(output_channels, output_channels, 1) ) setattr(self, name, nn.Sequential(*seq)) input_channels = output_channels output_channels = self._stage_out_channels[-1] self.conv5 = nn.Sequential( nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True), ) self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Linear(output_channels, num_classes) def featuremaps(self, x): x = self.conv1(x) x = self.maxpool(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = self.conv5(x) return x def forward(self, x): f = self.featuremaps(x) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError("Unsupported loss: {}".format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ if model_url is None: import warnings warnings.warn( 'ImageNet pretrained weights are unavailable for this model' ) return pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def shufflenet_v2_x0_5(num_classes, loss='softmax', pretrained=True, **kwargs): model = ShuffleNetV2( num_classes, loss, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['shufflenetv2_x0.5']) return model def shufflenet_v2_x1_0(num_classes, loss='softmax', pretrained=True, **kwargs): model = ShuffleNetV2( num_classes, loss, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['shufflenetv2_x1.0']) return model def shufflenet_v2_x1_5(num_classes, loss='softmax', pretrained=True, **kwargs): model = ShuffleNetV2( num_classes, loss, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['shufflenetv2_x1.5']) return model def shufflenet_v2_x2_0(num_classes, loss='softmax', pretrained=True, **kwargs): model = ShuffleNetV2( num_classes, loss, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['shufflenetv2_x2.0']) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/squeezenet.py ================================================ """ Code source: https://github.com/pytorch/vision """ from __future__ import division, absolute_import import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo __all__ = ['squeezenet1_0', 'squeezenet1_1', 'squeezenet1_0_fc512'] model_urls = { 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', } class Fire(nn.Module): def __init__( self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes ): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d( squeeze_planes, expand1x1_planes, kernel_size=1 ) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d( squeeze_planes, expand3x3_planes, kernel_size=3, padding=1 ) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat( [ self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x)) ], 1 ) class SqueezeNet(nn.Module): """SqueezeNet. Reference: Iandola et al. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and< 0.5 MB model size. arXiv:1602.07360. Public keys: - ``squeezenet1_0``: SqueezeNet (version=1.0). - ``squeezenet1_1``: SqueezeNet (version=1.1). - ``squeezenet1_0_fc512``: SqueezeNet (version=1.0) + FC. """ def __init__( self, num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs ): super(SqueezeNet, self).__init__() self.loss = loss self.feature_dim = 512 if version not in [1.0, 1.1]: raise ValueError( 'Unsupported SqueezeNet version {version}:' '1.0 or 1.1 expected'.format(version=version) ) if version == 1.0: self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256), ) else: self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256), ) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.fc = self._construct_fc_layer(fc_dims, 512, dropout_p) self.classifier = nn.Linear(self.feature_dim, num_classes) self._init_params() def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """Constructs fully connected layer Args: fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed input_dim (int): input dimension dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): f = self.features(x) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url, map_location=None) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs): model = SqueezeNet( num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['squeezenet1_0']) return model def squeezenet1_0_fc512( num_classes, loss='softmax', pretrained=True, **kwargs ): model = SqueezeNet( num_classes, loss, version=1.0, fc_dims=[512], dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['squeezenet1_0']) return model def squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs): model = SqueezeNet( num_classes, loss, version=1.1, fc_dims=None, dropout_p=None, **kwargs ) if pretrained: init_pretrained_weights(model, model_urls['squeezenet1_1']) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/models/xception.py ================================================ from __future__ import division, absolute_import import torch.nn as nn import torch.nn.functional as F import torch.utils.model_zoo as model_zoo __all__ = ['xception'] pretrained_settings = { 'xception': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/xception-43020ad28.pth', 'input_space': 'RGB', 'input_size': [3, 299, 299], 'input_range': [0, 1], 'mean': [0.5, 0.5, 0.5], 'std': [0.5, 0.5, 0.5], 'num_classes': 1000, 'scale': 0.8975 # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 } } } class SeparableConv2d(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False ): super(SeparableConv2d, self).__init__() self.conv1 = nn.Conv2d( in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias ) self.pointwise = nn.Conv2d( in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias ) def forward(self, x): x = self.conv1(x) x = self.pointwise(x) return x class Block(nn.Module): def __init__( self, in_filters, out_filters, reps, strides=1, start_with_relu=True, grow_first=True ): super(Block, self).__init__() if out_filters != in_filters or strides != 1: self.skip = nn.Conv2d( in_filters, out_filters, 1, stride=strides, bias=False ) self.skipbn = nn.BatchNorm2d(out_filters) else: self.skip = None self.relu = nn.ReLU(inplace=True) rep = [] filters = in_filters if grow_first: rep.append(self.relu) rep.append( SeparableConv2d( in_filters, out_filters, 3, stride=1, padding=1, bias=False ) ) rep.append(nn.BatchNorm2d(out_filters)) filters = out_filters for i in range(reps - 1): rep.append(self.relu) rep.append( SeparableConv2d( filters, filters, 3, stride=1, padding=1, bias=False ) ) rep.append(nn.BatchNorm2d(filters)) if not grow_first: rep.append(self.relu) rep.append( SeparableConv2d( in_filters, out_filters, 3, stride=1, padding=1, bias=False ) ) rep.append(nn.BatchNorm2d(out_filters)) if not start_with_relu: rep = rep[1:] else: rep[0] = nn.ReLU(inplace=False) if strides != 1: rep.append(nn.MaxPool2d(3, strides, 1)) self.rep = nn.Sequential(*rep) def forward(self, inp): x = self.rep(inp) if self.skip is not None: skip = self.skip(inp) skip = self.skipbn(skip) else: skip = inp x += skip return x class Xception(nn.Module): """Xception. Reference: Chollet. Xception: Deep Learning with Depthwise Separable Convolutions. CVPR 2017. Public keys: - ``xception``: Xception. """ def __init__( self, num_classes, loss, fc_dims=None, dropout_p=None, **kwargs ): super(Xception, self).__init__() self.loss = loss self.conv1 = nn.Conv2d(3, 32, 3, 2, 0, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, 3, bias=False) self.bn2 = nn.BatchNorm2d(64) self.block1 = Block( 64, 128, 2, 2, start_with_relu=False, grow_first=True ) self.block2 = Block( 128, 256, 2, 2, start_with_relu=True, grow_first=True ) self.block3 = Block( 256, 728, 2, 2, start_with_relu=True, grow_first=True ) self.block4 = Block( 728, 728, 3, 1, start_with_relu=True, grow_first=True ) self.block5 = Block( 728, 728, 3, 1, start_with_relu=True, grow_first=True ) self.block6 = Block( 728, 728, 3, 1, start_with_relu=True, grow_first=True ) self.block7 = Block( 728, 728, 3, 1, start_with_relu=True, grow_first=True ) self.block8 = Block( 728, 728, 3, 1, start_with_relu=True, grow_first=True ) self.block9 = Block( 728, 728, 3, 1, start_with_relu=True, grow_first=True ) self.block10 = Block( 728, 728, 3, 1, start_with_relu=True, grow_first=True ) self.block11 = Block( 728, 728, 3, 1, start_with_relu=True, grow_first=True ) self.block12 = Block( 728, 1024, 2, 2, start_with_relu=True, grow_first=False ) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = nn.BatchNorm2d(1536) self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) self.bn4 = nn.BatchNorm2d(2048) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.feature_dim = 2048 self.fc = self._construct_fc_layer(fc_dims, 2048, dropout_p) self.classifier = nn.Linear(self.feature_dim, num_classes) self._init_params() def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): """Constructs fully connected layer. Args: fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed input_dim (int): input dimension dropout_p (float): dropout probability, if None, dropout is unused """ if fc_dims is None: self.feature_dim = input_dim return None assert isinstance( fc_dims, (list, tuple) ), 'fc_dims must be either list or tuple, but got {}'.format( type(fc_dims) ) layers = [] for dim in fc_dims: layers.append(nn.Linear(input_dim, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) input_dim = dim self.feature_dim = fc_dims[-1] return nn.Sequential(*layers) def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu' ) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def featuremaps(self, input): x = self.conv1(input) x = self.bn1(x) x = F.relu(x, inplace=True) x = self.conv2(x) x = self.bn2(x) x = F.relu(x, inplace=True) x = self.block1(x) x = self.block2(x) x = self.block3(x) x = self.block4(x) x = self.block5(x) x = self.block6(x) x = self.block7(x) x = self.block8(x) x = self.block9(x) x = self.block10(x) x = self.block11(x) x = self.block12(x) x = self.conv3(x) x = self.bn3(x) x = F.relu(x, inplace=True) x = self.conv4(x) x = self.bn4(x) x = F.relu(x, inplace=True) return x def forward(self, x): f = self.featuremaps(x) v = self.global_avgpool(f) v = v.view(v.size(0), -1) if self.fc is not None: v = self.fc(v) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initialize models with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = { k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size() } model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def xception(num_classes, loss='softmax', pretrained=True, **kwargs): model = Xception(num_classes, loss, fc_dims=None, dropout_p=None, **kwargs) if pretrained: model_url = pretrained_settings['xception']['imagenet']['url'] init_pretrained_weights(model, model_url) return model ================================================ FILE: DLTA_AI_app/trackers/strongsort/deep/reid_model_factory.py ================================================ import torch from collections import OrderedDict __model_types = [ 'resnet50', 'mlfn', 'hacnn', 'mobilenetv2_x1_0', 'mobilenetv2_x1_4', 'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0', 'osnet_ain_x1_0'] __trained_urls = { # market1501 models ######################################################## 'resnet50_market1501.pt': 'https://drive.google.com/uc?id=1dUUZ4rHDWohmsQXCRe2C_HbYkzz94iBV', 'resnet50_dukemtmcreid.pt': 'https://drive.google.com/uc?id=17ymnLglnc64NRvGOitY3BqMRS9UWd1wg', 'resnet50_msmt17.pt': 'https://drive.google.com/uc?id=1ep7RypVDOthCRIAqDnn4_N-UhkkFHJsj', 'resnet50_fc512_market1501.pt': 'https://drive.google.com/uc?id=1kv8l5laX_YCdIGVCetjlNdzKIA3NvsSt', 'resnet50_fc512_dukemtmcreid.pt': 'https://drive.google.com/uc?id=13QN8Mp3XH81GK4BPGXobKHKyTGH50Rtx', 'resnet50_fc512_msmt17.pt': 'https://drive.google.com/uc?id=1fDJLcz4O5wxNSUvImIIjoaIF9u1Rwaud', 'mlfn_market1501.pt': 'https://drive.google.com/uc?id=1wXcvhA_b1kpDfrt9s2Pma-MHxtj9pmvS', 'mlfn_dukemtmcreid.pt': 'https://drive.google.com/uc?id=1rExgrTNb0VCIcOnXfMsbwSUW1h2L1Bum', 'mlfn_msmt17.pt': 'https://drive.google.com/uc?id=18JzsZlJb3Wm7irCbZbZ07TN4IFKvR6p-', 'hacnn_market1501.pt': 'https://drive.google.com/uc?id=1LRKIQduThwGxMDQMiVkTScBwR7WidmYF', 'hacnn_dukemtmcreid.pt': 'https://drive.google.com/uc?id=1zNm6tP4ozFUCUQ7Sv1Z98EAJWXJEhtYH', 'hacnn_msmt17.pt': 'https://drive.google.com/uc?id=1MsKRtPM5WJ3_Tk2xC0aGOO7pM3VaFDNZ', 'mobilenetv2_x1_0_market1501.pt': 'https://drive.google.com/uc?id=18DgHC2ZJkjekVoqBWszD8_Xiikz-fewp', 'mobilenetv2_x1_0_dukemtmcreid.pt': 'https://drive.google.com/uc?id=1q1WU2FETRJ3BXcpVtfJUuqq4z3psetds', 'mobilenetv2_x1_0_msmt17.pt': 'https://drive.google.com/uc?id=1j50Hv14NOUAg7ZeB3frzfX-WYLi7SrhZ', 'mobilenetv2_x1_4_market1501.pt': 'https://drive.google.com/uc?id=1t6JCqphJG-fwwPVkRLmGGyEBhGOf2GO5', 'mobilenetv2_x1_4_dukemtmcreid.pt': 'https://drive.google.com/uc?id=12uD5FeVqLg9-AFDju2L7SQxjmPb4zpBN', 'mobilenetv2_x1_4_msmt17.pt': 'https://drive.google.com/uc?id=1ZY5P2Zgm-3RbDpbXM0kIBMPvspeNIbXz', 'osnet_x1_0_market1501.pt': 'https://drive.google.com/uc?id=1vduhq5DpN2q1g4fYEZfPI17MJeh9qyrA', 'osnet_x1_0_dukemtmcreid.pt': 'https://drive.google.com/uc?id=1QZO_4sNf4hdOKKKzKc-TZU9WW1v6zQbq', 'osnet_x1_0_msmt17.pt': 'https://drive.google.com/uc?id=112EMUfBPYeYg70w-syK6V6Mx8-Qb9Q1M', 'osnet_x0_75_market1501.pt': 'https://drive.google.com/uc?id=1ozRaDSQw_EQ8_93OUmjDbvLXw9TnfPer', 'osnet_x0_75_dukemtmcreid.pt': 'https://drive.google.com/uc?id=1IE3KRaTPp4OUa6PGTFL_d5_KQSJbP0Or', 'osnet_x0_75_msmt17.pt': 'https://drive.google.com/uc?id=1QEGO6WnJ-BmUzVPd3q9NoaO_GsPNlmWc', 'osnet_x0_5_market1501.pt': 'https://drive.google.com/uc?id=1PLB9rgqrUM7blWrg4QlprCuPT7ILYGKT', 'osnet_x0_5_dukemtmcreid.pt': 'https://drive.google.com/uc?id=1KoUVqmiST175hnkALg9XuTi1oYpqcyTu', 'osnet_x0_5_msmt17.pt': 'https://drive.google.com/uc?id=1UT3AxIaDvS2PdxzZmbkLmjtiqq7AIKCv', 'osnet_x0_25_market1501.pt': 'https://drive.google.com/uc?id=1z1UghYvOTtjx7kEoRfmqSMu-z62J6MAj', 'osnet_x0_25_dukemtmcreid.pt': 'https://drive.google.com/uc?id=1eumrtiXT4NOspjyEV4j8cHmlOaaCGk5l', 'osnet_x0_25_msmt17.pt': 'https://drive.google.com/uc?id=1sSwXSUlj4_tHZequ_iZ8w_Jh0VaRQMqF', ####### market1501 models ################################################## 'resnet50_msmt17.pt': 'https://drive.google.com/uc?id=1yiBteqgIZoOeywE8AhGmEQl7FTVwrQmf', 'osnet_x1_0_msmt17.pt': 'https://drive.google.com/uc?id=1IosIFlLiulGIjwW3H8uMRmx3MzPwf86x', 'osnet_x0_75_msmt17.pt': 'https://drive.google.com/uc?id=1fhjSS_7SUGCioIf2SWXaRGPqIY9j7-uw', 'osnet_x0_5_msmt17.pt': 'https://drive.google.com/uc?id=1DHgmb6XV4fwG3n-CnCM0zdL9nMsZ9_RF', 'osnet_x0_25_msmt17.pt': 'https://drive.google.com/uc?id=1Kkx2zW89jq_NETu4u42CFZTMVD5Hwm6e', 'osnet_ibn_x1_0_msmt17.pt': 'https://drive.google.com/uc?id=1q3Sj2ii34NlfxA4LvmHdWO_75NDRmECJ', 'osnet_ain_x1_0_msmt17.pt': 'https://drive.google.com/uc?id=1SigwBE6mPdqiJMqhuIY4aqC7--5CsMal', } def show_downloadeable_models(): print('\nAvailable .pt ReID models for automatic download') print(list(__trained_urls.keys())) def get_model_url(model): if model.name in __trained_urls: return __trained_urls[model.name] else: None def is_model_in_model_types(model): if model.name in __model_types: return True else: return False def get_model_name(model): for x in __model_types: if x in model.name: return x return None def download_url(url, dst): """Downloads file from a url to a destination. Args: url (str): url to download file. dst (str): destination path. """ from six.moves import urllib print('* url="{}"'.format(url)) print('* destination="{}"'.format(dst)) def _reporthook(count, block_size, total_size): global start_time if count == 0: start_time = time.time() return duration = time.time() - start_time progress_size = int(count * block_size) speed = int(progress_size / (1024*duration)) percent = int(count * block_size * 100 / total_size) sys.stdout.write( '\r...%d%%, %d MB, %d KB/s, %d seconds passed' % (percent, progress_size / (1024*1024), speed, duration) ) sys.stdout.flush() urllib.request.urlretrieve(url, dst, _reporthook) sys.stdout.write('\n') def load_pretrained_weights(model, weight_path): r"""Loads pretrianed weights to model. Features:: - Incompatible layers (unmatched in name or size) will be ignored. - Can automatically deal with keys containing "module.". Args: model (nn.Module): network model. weight_path (str): path to pretrained weights. Examples:: >>> from torchreid.utils import load_pretrained_weights >>> weight_path = 'log/my_model/model-best.pth.tar' >>> load_pretrained_weights(model, weight_path) """ checkpoint = torch.load(weight_path) if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint model_dict = model.state_dict() new_state_dict = OrderedDict() matched_layers, discarded_layers = [], [] for k, v in state_dict.items(): if k.startswith('module.'): k = k[7:] # discard module. if k in model_dict and model_dict[k].size() == v.size(): new_state_dict[k] = v matched_layers.append(k) else: discarded_layers.append(k) model_dict.update(new_state_dict) model.load_state_dict(model_dict) if len(matched_layers) == 0: warnings.warn( 'The pretrained weights "{}" cannot be loaded, ' 'please check the key names manually ' '(** ignored and continue **)'.format(weight_path) ) else: print( 'Successfully loaded pretrained weights from "{}"'. format(weight_path) ) if len(discarded_layers) > 0: print( '** The following layers are discarded ' 'due to unmatched keys or layer size: {}'. format(discarded_layers) ) ================================================ FILE: DLTA_AI_app/trackers/strongsort/reid_multibackend.py ================================================ import torch.nn as nn import torch from pathlib import Path import numpy as np from itertools import islice import torchvision.transforms as transforms import cv2 import sys import torchvision.transforms as T from collections import OrderedDict, namedtuple import gdown from os.path import exists as file_exists from ultralytics.yolo.utils.checks import check_requirements, check_version from ultralytics.yolo.utils import LOGGER from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name, download_url, load_pretrained_weights) from trackers.strongsort.deep.models import build_model def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): # Check file(s) for acceptable suffix if file and suffix: if isinstance(suffix, str): suffix = [suffix] for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" class ReIDDetectMultiBackend(nn.Module): # ReID models MultiBackend class for python inference on various backends def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False): super().__init__() w = weights[0] if isinstance(weights, list) else weights self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w) # get backend self.fp16 = fp16 self.fp16 &= self.pt or self.jit or self.engine # FP16 # Build transform functions self.device = device self.image_size=(256, 128) self.pixel_mean=[0.485, 0.456, 0.406] self.pixel_std=[0.229, 0.224, 0.225] self.transforms = [] self.transforms += [T.Resize(self.image_size)] self.transforms += [T.ToTensor()] self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)] self.preprocess = T.Compose(self.transforms) self.to_pil = T.ToPILImage() model_name = get_model_name(w) if w.suffix == '.pt': model_url = get_model_url(w) if not file_exists(w) and model_url is not None: gdown.download(model_url, str(w), quiet=False) elif file_exists(w): pass else: print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:') show_downloadeable_models() exit() # Build model self.model = build_model( model_name, num_classes=1, pretrained=not (w and w.is_file()), use_gpu=device ) if self.pt: # PyTorch # populate model arch with weights if w and w.is_file() and w.suffix == '.pt': load_pretrained_weights(self.model, w) self.model.to(device).eval() self.model.half() if self.fp16 else self.model.float() elif self.jit: LOGGER.info(f'Loading {w} for TorchScript inference...') self.model = torch.jit.load(w) self.model.half() if self.fp16 else self.model.float() elif self.onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') cuda = torch.cuda.is_available() and device.type != 'cpu' #check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] self.session = onnxruntime.InferenceSession(str(w), providers=providers) elif self.engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 if device.type == 'cpu': device = torch.device('cuda:0') Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: self.model_ = runtime.deserialize_cuda_engine(f.read()) self.context = self.model_.create_execution_context() self.bindings = OrderedDict() self.fp16 = False # default updated below dynamic = False for index in range(self.model_.num_bindings): name = self.model_.get_binding_name(index) dtype = trt.nptype(self.model_.get_binding_dtype(index)) if self.model_.binding_is_input(index): if -1 in tuple(self.model_.get_binding_shape(index)): # dynamic dynamic = True self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2])) if dtype == np.float16: self.fp16 = True shape = tuple(self.context.get_binding_shape(index)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) batch_size = self.bindings['images'].shape[0] # if dynamic, this is instead max batch size elif self.xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch ie = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: network.get_parameters()[0].set_layout(Layout("NCWH")) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() self.executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 self.output_layer = next(iter(self.executable_network.outputs)) elif self.tflite: LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: import tensorflow as tf Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, self.interpreter = tf.lite.Interpreter(model_path=w) self.interpreter.allocate_tensors() # Get input and output tensors. self.input_details = self.interpreter.get_input_details() self.output_details = self.interpreter.get_output_details() # Test model on random input data. input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32) self.interpreter.set_tensor(self.input_details[0]['index'], input_data) self.interpreter.invoke() # The function `get_tensor()` returns a copy of the tensor data. output_data = self.interpreter.get_tensor(self.output_details[0]['index']) else: print('This model framework is not supported yet!') exit() @staticmethod def model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx from trackers.reid_export import export_formats sf = list(export_formats().Suffix) # export suffixes check_suffix(p, sf) # checks types = [s in Path(p).name for s in sf] return types def _preprocess(self, im_batch): images = [] for element in im_batch: image = self.to_pil(element) image = self.preprocess(image) images.append(image) images = torch.stack(images, dim=0) images = images.to(self.device) return images def forward(self, im_batch): # preprocess batch im_batch = self._preprocess(im_batch) # batch to half if self.fp16 and im_batch.dtype != torch.float16: im_batch = im_batch.half() # batch processing features = [] if self.pt: features = self.model(im_batch) elif self.jit: # TorchScript features = self.model(im_batch) elif self.onnx: # ONNX Runtime im_batch = im_batch.cpu().numpy() # torch to numpy features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0] elif self.engine: # TensorRT if True and im_batch.shape != self.bindings['images'].shape: i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output')) self.context.set_binding_shape(i_in, im_batch.shape) # reshape if dynamic self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape) self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) s = self.bindings['images'].shape assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs['images'] = int(im_batch.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) features = self.bindings['output'].data elif self.xml: # OpenVINO im_batch = im_batch.cpu().numpy() # FP32 features = self.executable_network([im_batch])[self.output_layer] else: print('Framework not supported at the moment, we are working on it...') exit() if isinstance(features, (list, tuple)): return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features] else: return self.from_numpy(features) def from_numpy(self, x): return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=[(256, 128, 3)]): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite if any(warmup_types) and self.device.type != 'cpu': im = [np.empty(*imgsz).astype(np.uint8)] # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/__init__.py ================================================ ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/detection.py ================================================ # vim: expandtab:ts=4:sw=4 import numpy as np class Detection(object): """ This class represents a bounding box detection in a single image. Parameters ---------- tlwh : array_like Bounding box in format `(x, y, w, h)`. confidence : float Detector confidence score. feature : array_like A feature vector that describes the object contained in this image. Attributes ---------- tlwh : ndarray Bounding box in format `(top left x, top left y, width, height)`. confidence : ndarray Detector confidence score. feature : ndarray | NoneType A feature vector that describes the object contained in this image. """ def __init__(self, tlwh, confidence, feature): self.tlwh = np.asarray(tlwh, dtype=np.float32) self.confidence = float(confidence) self.feature = np.asarray(feature.cpu(), dtype=np.float32) def to_tlbr(self): """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`. """ ret = self.tlwh.copy() ret[2:] += ret[:2] return ret def to_xyah(self): """Convert bounding box to format `(center x, center y, aspect ratio, height)`, where the aspect ratio is `width / height`. """ ret = self.tlwh.copy() ret[:2] += ret[2:] / 2 ret[2] /= ret[3] return ret def to_xyah_ext(bbox): """Convert bounding box to format `(center x, center y, aspect ratio, height)`, where the aspect ratio is `width / height`. """ ret = bbox.copy() ret[:2] += ret[2:] / 2 ret[2] /= ret[3] return ret ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/iou_matching.py ================================================ # vim: expandtab:ts=4:sw=4 from __future__ import absolute_import import numpy as np from . import linear_assignment def iou(bbox, candidates): """Computer intersection over union. Parameters ---------- bbox : ndarray A bounding box in format `(top left x, top left y, width, height)`. candidates : ndarray A matrix of candidate bounding boxes (one per row) in the same format as `bbox`. Returns ------- ndarray The intersection over union in [0, 1] between the `bbox` and each candidate. A higher score means a larger fraction of the `bbox` is occluded by the candidate. """ bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:] candidates_tl = candidates[:, :2] candidates_br = candidates[:, :2] + candidates[:, 2:] tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis], np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]] br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis], np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]] wh = np.maximum(0., br - tl) area_intersection = wh.prod(axis=1) area_bbox = bbox[2:].prod() area_candidates = candidates[:, 2:].prod(axis=1) return area_intersection / (area_bbox + area_candidates - area_intersection) def iou_cost(tracks, detections, track_indices=None, detection_indices=None): """An intersection over union distance metric. Parameters ---------- tracks : List[deep_sort.track.Track] A list of tracks. detections : List[deep_sort.detection.Detection] A list of detections. track_indices : Optional[List[int]] A list of indices to tracks that should be matched. Defaults to all `tracks`. detection_indices : Optional[List[int]] A list of indices to detections that should be matched. Defaults to all `detections`. Returns ------- ndarray Returns a cost matrix of shape len(track_indices), len(detection_indices) where entry (i, j) is `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`. """ if track_indices is None: track_indices = np.arange(len(tracks)) if detection_indices is None: detection_indices = np.arange(len(detections)) cost_matrix = np.zeros((len(track_indices), len(detection_indices))) for row, track_idx in enumerate(track_indices): if tracks[track_idx].time_since_update > 1: cost_matrix[row, :] = linear_assignment.INFTY_COST continue bbox = tracks[track_idx].to_tlwh() candidates = np.asarray( [detections[i].tlwh for i in detection_indices]) cost_matrix[row, :] = 1. - iou(bbox, candidates) return cost_matrix ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/kalman_filter.py ================================================ # vim: expandtab:ts=4:sw=4 import numpy as np import scipy.linalg """ Table for the 0.95 quantile of the chi-square distribution with N degrees of freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv function and used as Mahalanobis gating threshold. """ chi2inv95 = { 1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919} class KalmanFilter(object): """ A simple Kalman filter for tracking bounding boxes in image space. The 8-dimensional state space x, y, a, h, vx, vy, va, vh contains the bounding box center position (x, y), aspect ratio a, height h, and their respective velocities. Object motion follows a constant velocity model. The bounding box location (x, y, a, h) is taken as direct observation of the state space (linear observation model). """ def __init__(self): ndim, dt = 4, 1. # Create Kalman filter model matrices. self._motion_mat = np.eye(2 * ndim, 2 * ndim) for i in range(ndim): self._motion_mat[i, ndim + i] = dt self._update_mat = np.eye(ndim, 2 * ndim) # Motion and observation uncertainty are chosen relative to the current # state estimate. These weights control the amount of uncertainty in # the model. This is a bit hacky. self._std_weight_position = 1. / 20 self._std_weight_velocity = 1. / 160 def initiate(self, measurement): """Create track from unassociated measurement. Parameters ---------- measurement : ndarray Bounding box coordinates (x, y, a, h) with center position (x, y), aspect ratio a, and height h. Returns ------- (ndarray, ndarray) Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean. """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ 2 * self._std_weight_position * measurement[0], # the center point x 2 * self._std_weight_position * measurement[1], # the center point y 1 * measurement[2], # the ratio of width/height 2 * self._std_weight_position * measurement[3], # the height 10 * self._std_weight_velocity * measurement[0], 10 * self._std_weight_velocity * measurement[1], 0.1 * measurement[2], 10 * self._std_weight_velocity * measurement[3]] covariance = np.diag(np.square(std)) return mean, covariance def predict(self, mean, covariance): """Run Kalman filter prediction step. Parameters ---------- mean : ndarray The 8 dimensional mean vector of the object state at the previous time step. covariance : ndarray The 8x8 dimensional covariance matrix of the object state at the previous time step. Returns ------- (ndarray, ndarray) Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[0], self._std_weight_position * mean[1], 1 * mean[2], self._std_weight_position * mean[3]] std_vel = [ self._std_weight_velocity * mean[0], self._std_weight_velocity * mean[1], 0.1 * mean[2], self._std_weight_velocity * mean[3]] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) mean = np.dot(self._motion_mat, mean) covariance = np.linalg.multi_dot(( self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance def project(self, mean, covariance, confidence=.0): """Project state distribution to measurement space. Parameters ---------- mean : ndarray The state's mean vector (8 dimensional array). covariance : ndarray The state's covariance matrix (8x8 dimensional). confidence: (dyh) 检测框置信度 Returns ------- (ndarray, ndarray) Returns the projected mean and covariance matrix of the given state estimate. """ std = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1, self._std_weight_position * mean[3]] std = [(1 - confidence) * x for x in std] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot(( self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov def update(self, mean, covariance, measurement, confidence=.0): """Run Kalman filter correction step. Parameters ---------- mean : ndarray The predicted state's mean vector (8 dimensional). covariance : ndarray The state's covariance matrix (8x8 dimensional). measurement : ndarray The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center position, a the aspect ratio, and h the height of the bounding box. confidence: (dyh)检测框置信度 Returns ------- (ndarray, ndarray) Returns the measurement-corrected state distribution. """ projected_mean, projected_cov = self.project(mean, covariance, confidence) chol_factor, lower = scipy.linalg.cho_factor( projected_cov, lower=True, check_finite=False) kalman_gain = scipy.linalg.cho_solve( (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False).T innovation = measurement - projected_mean new_mean = mean + np.dot(innovation, kalman_gain.T) new_covariance = covariance - np.linalg.multi_dot(( kalman_gain, projected_cov, kalman_gain.T)) return new_mean, new_covariance def gating_distance(self, mean, covariance, measurements, only_position=False): """Compute gating distance between state distribution and measurements. A suitable distance threshold can be obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of freedom, otherwise 2. Parameters ---------- mean : ndarray Mean vector over the state distribution (8 dimensional). covariance : ndarray Covariance of the state distribution (8x8 dimensional). measurements : ndarray An Nx4 dimensional matrix of N measurements, each in format (x, y, a, h) where (x, y) is the bounding box center position, a the aspect ratio, and h the height. only_position : Optional[bool] If True, distance computation is done with respect to the bounding box center position only. Returns ------- ndarray Returns an array of length N, where the i-th element contains the squared Mahalanobis distance between (mean, covariance) and `measurements[i]`. """ mean, covariance = self.project(mean, covariance) if only_position: mean, covariance = mean[:2], covariance[:2, :2] measurements = measurements[:, :2] cholesky_factor = np.linalg.cholesky(covariance) d = measurements - mean z = scipy.linalg.solve_triangular( cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) squared_maha = np.sum(z * z, axis=0) return squared_maha ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/linear_assignment.py ================================================ # vim: expandtab:ts=4:sw=4 from __future__ import absolute_import import numpy as np from scipy.optimize import linear_sum_assignment from . import kalman_filter INFTY_COST = 1e+5 def min_cost_matching( distance_metric, max_distance, tracks, detections, track_indices=None, detection_indices=None): """Solve linear assignment problem. Parameters ---------- distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray The distance metric is given a list of tracks and detections as well as a list of N track indices and M detection indices. The metric should return the NxM dimensional cost matrix, where element (i, j) is the association cost between the i-th track in the given track indices and the j-th detection in the given detection_indices. max_distance : float Gating threshold. Associations with cost larger than this value are disregarded. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : List[int] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). detection_indices : List[int] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). Returns ------- (List[(int, int)], List[int], List[int]) Returns a tuple with the following three entries: * A list of matched track and detection indices. * A list of unmatched track indices. * A list of unmatched detection indices. """ if track_indices is None: track_indices = np.arange(len(tracks)) if detection_indices is None: detection_indices = np.arange(len(detections)) if len(detection_indices) == 0 or len(track_indices) == 0: return [], track_indices, detection_indices # Nothing to match. cost_matrix = distance_metric( tracks, detections, track_indices, detection_indices) cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5 row_indices, col_indices = linear_sum_assignment(cost_matrix) matches, unmatched_tracks, unmatched_detections = [], [], [] for col, detection_idx in enumerate(detection_indices): if col not in col_indices: unmatched_detections.append(detection_idx) for row, track_idx in enumerate(track_indices): if row not in row_indices: unmatched_tracks.append(track_idx) for row, col in zip(row_indices, col_indices): track_idx = track_indices[row] detection_idx = detection_indices[col] if cost_matrix[row, col] > max_distance: unmatched_tracks.append(track_idx) unmatched_detections.append(detection_idx) else: matches.append((track_idx, detection_idx)) return matches, unmatched_tracks, unmatched_detections def matching_cascade( distance_metric, max_distance, cascade_depth, tracks, detections, track_indices=None, detection_indices=None): """Run matching cascade. Parameters ---------- distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray The distance metric is given a list of tracks and detections as well as a list of N track indices and M detection indices. The metric should return the NxM dimensional cost matrix, where element (i, j) is the association cost between the i-th track in the given track indices and the j-th detection in the given detection indices. max_distance : float Gating threshold. Associations with cost larger than this value are disregarded. cascade_depth: int The cascade depth, should be se to the maximum track age. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : Optional[List[int]] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). Defaults to all tracks. detection_indices : Optional[List[int]] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). Defaults to all detections. Returns ------- (List[(int, int)], List[int], List[int]) Returns a tuple with the following three entries: * A list of matched track and detection indices. * A list of unmatched track indices. * A list of unmatched detection indices. """ if track_indices is None: track_indices = list(range(len(tracks))) if detection_indices is None: detection_indices = list(range(len(detections))) unmatched_detections = detection_indices matches = [] track_indices_l = [ k for k in track_indices # if tracks[k].time_since_update == 1 + level ] matches_l, _, unmatched_detections = \ min_cost_matching( distance_metric, max_distance, tracks, detections, track_indices_l, unmatched_detections) matches += matches_l unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches)) return matches, unmatched_tracks, unmatched_detections def gate_cost_matrix( cost_matrix, tracks, detections, track_indices, detection_indices, mc_lambda, gated_cost=INFTY_COST, only_position=False): """Invalidate infeasible entries in cost matrix based on the state distributions obtained by Kalman filtering. Parameters ---------- kf : The Kalman filter. cost_matrix : ndarray The NxM dimensional cost matrix, where N is the number of track indices and M is the number of detection indices, such that entry (i, j) is the association cost between `tracks[track_indices[i]]` and `detections[detection_indices[j]]`. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : List[int] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). detection_indices : List[int] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). gated_cost : Optional[float] Entries in the cost matrix corresponding to infeasible associations are set this value. Defaults to a very large value. only_position : Optional[bool] If True, only the x, y position of the state distribution is considered during gating. Defaults to False. Returns ------- ndarray Returns the modified cost matrix. """ gating_dim = 2 if only_position else 4 gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray( [detections[i].to_xyah() for i in detection_indices]) for row, track_idx in enumerate(track_indices): track = tracks[track_idx] gating_distance = track.kf.gating_distance(track.mean, track.covariance, measurements, only_position) cost_matrix[row, gating_distance > gating_threshold] = gated_cost cost_matrix[row] = mc_lambda * cost_matrix[row] + (1 - mc_lambda) * gating_distance return cost_matrix ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/nn_matching.py ================================================ # vim: expandtab:ts=4:sw=4 import numpy as np import sys import torch def _pdist(a, b): """Compute pair-wise squared distance between points in `a` and `b`. Parameters ---------- a : array_like An NxM matrix of N samples of dimensionality M. b : array_like An LxM matrix of L samples of dimensionality M. Returns ------- ndarray Returns a matrix of size len(a), len(b) such that eleement (i, j) contains the squared distance between `a[i]` and `b[j]`. """ a, b = np.asarray(a), np.asarray(b) if len(a) == 0 or len(b) == 0: return np.zeros((len(a), len(b))) a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1) r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :] r2 = np.clip(r2, 0., float(np.inf)) return r2 def _cosine_distance(a, b, data_is_normalized=False): """Compute pair-wise cosine distance between points in `a` and `b`. Parameters ---------- a : array_like An NxM matrix of N samples of dimensionality M. b : array_like An LxM matrix of L samples of dimensionality M. data_is_normalized : Optional[bool] If True, assumes rows in a and b are unit length vectors. Otherwise, a and b are explicitly normalized to lenght 1. Returns ------- ndarray Returns a matrix of size len(a), len(b) such that eleement (i, j) contains the squared distance between `a[i]` and `b[j]`. """ if not data_is_normalized: a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True) b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True) return 1. - np.dot(a, b.T) def _nn_euclidean_distance(x, y): """ Helper function for nearest neighbor distance metric (Euclidean). Parameters ---------- x : ndarray A matrix of N row-vectors (sample points). y : ndarray A matrix of M row-vectors (query points). Returns ------- ndarray A vector of length M that contains for each entry in `y` the smallest Euclidean distance to a sample in `x`. """ # x_ = torch.from_numpy(np.asarray(x) / np.linalg.norm(x, axis=1, keepdims=True)) # y_ = torch.from_numpy(np.asarray(y) / np.linalg.norm(y, axis=1, keepdims=True)) distances = distances = _pdist(x, y) return np.maximum(0.0, torch.min(distances, axis=0)[0].numpy()) def _nn_cosine_distance(x, y): """ Helper function for nearest neighbor distance metric (cosine). Parameters ---------- x : ndarray A matrix of N row-vectors (sample points). y : ndarray A matrix of M row-vectors (query points). Returns ------- ndarray A vector of length M that contains for each entry in `y` the smallest cosine distance to a sample in `x`. """ x_ = torch.from_numpy(np.asarray(x)) y_ = torch.from_numpy(np.asarray(y)) distances = _cosine_distance(x_, y_) distances = distances return distances.min(axis=0) class NearestNeighborDistanceMetric(object): """ A nearest neighbor distance metric that, for each target, returns the closest distance to any sample that has been observed so far. Parameters ---------- metric : str Either "euclidean" or "cosine". matching_threshold: float The matching threshold. Samples with larger distance are considered an invalid match. budget : Optional[int] If not None, fix samples per class to at most this number. Removes the oldest samples when the budget is reached. Attributes ---------- samples : Dict[int -> List[ndarray]] A dictionary that maps from target identities to the list of samples that have been observed so far. """ def __init__(self, metric, matching_threshold, budget=None): if metric == "euclidean": self._metric = _nn_euclidean_distance elif metric == "cosine": self._metric = _nn_cosine_distance else: raise ValueError( "Invalid metric; must be either 'euclidean' or 'cosine'") self.matching_threshold = matching_threshold self.budget = budget self.samples = {} def partial_fit(self, features, targets, active_targets): """Update the distance metric with new data. Parameters ---------- features : ndarray An NxM matrix of N features of dimensionality M. targets : ndarray An integer array of associated target identities. active_targets : List[int] A list of targets that are currently present in the scene. """ for feature, target in zip(features, targets): self.samples.setdefault(target, []).append(feature) if self.budget is not None: self.samples[target] = self.samples[target][-self.budget:] self.samples = {k: self.samples[k] for k in active_targets} def distance(self, features, targets): """Compute distance between features and targets. Parameters ---------- features : ndarray An NxM matrix of N features of dimensionality M. targets : List[int] A list of targets to match the given `features` against. Returns ------- ndarray Returns a cost matrix of shape len(targets), len(features), where element (i, j) contains the closest squared distance between `targets[i]` and `features[j]`. """ cost_matrix = np.zeros((len(targets), len(features))) for i, target in enumerate(targets): cost_matrix[i, :] = self._metric(self.samples[target], features) return cost_matrix ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/preprocessing.py ================================================ # vim: expandtab:ts=4:sw=4 import numpy as np import cv2 def non_max_suppression(boxes, max_bbox_overlap, scores=None): """Suppress overlapping detections. Original code from [1]_ has been adapted to include confidence score. .. [1] http://www.pyimagesearch.com/2015/02/16/ faster-non-maximum-suppression-python/ Examples -------- >>> boxes = [d.roi for d in detections] >>> scores = [d.confidence for d in detections] >>> indices = non_max_suppression(boxes, max_bbox_overlap, scores) >>> detections = [detections[i] for i in indices] Parameters ---------- boxes : ndarray Array of ROIs (x, y, width, height). max_bbox_overlap : float ROIs that overlap more than this values are suppressed. scores : Optional[array_like] Detector confidence score. Returns ------- List[int] Returns indices of detections that have survived non-maxima suppression. """ if len(boxes) == 0: return [] boxes = boxes.astype(np.float) pick = [] x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] + boxes[:, 0] y2 = boxes[:, 3] + boxes[:, 1] area = (x2 - x1 + 1) * (y2 - y1 + 1) if scores is not None: idxs = np.argsort(scores) else: idxs = np.argsort(y2) while len(idxs) > 0: last = len(idxs) - 1 i = idxs[last] pick.append(i) xx1 = np.maximum(x1[i], x1[idxs[:last]]) yy1 = np.maximum(y1[i], y1[idxs[:last]]) xx2 = np.minimum(x2[i], x2[idxs[:last]]) yy2 = np.minimum(y2[i], y2[idxs[:last]]) w = np.maximum(0, xx2 - xx1 + 1) h = np.maximum(0, yy2 - yy1 + 1) overlap = (w * h) / area[idxs[:last]] idxs = np.delete( idxs, np.concatenate( ([last], np.where(overlap > max_bbox_overlap)[0]))) return pick ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/track.py ================================================ # vim: expandtab:ts=4:sw=4 import cv2 import numpy as np from trackers.strongsort.sort.kalman_filter import KalmanFilter from collections import deque class TrackState: """ Enumeration type for the single target track state. Newly created tracks are classified as `tentative` until enough evidence has been collected. Then, the track state is changed to `confirmed`. Tracks that are no longer alive are classified as `deleted` to mark them for removal from the set of active tracks. """ Tentative = 1 Confirmed = 2 Deleted = 3 class Track: """ A single target track with state space `(x, y, a, h)` and associated velocities, where `(x, y)` is the center of the bounding box, `a` is the aspect ratio and `h` is the height. Parameters ---------- mean : ndarray Mean vector of the initial state distribution. covariance : ndarray Covariance matrix of the initial state distribution. track_id : int A unique track identifier. n_init : int Number of consecutive detections before the track is confirmed. The track state is set to `Deleted` if a miss occurs within the first `n_init` frames. max_age : int The maximum number of consecutive misses before the track state is set to `Deleted`. feature : Optional[ndarray] Feature vector of the detection this track originates from. If not None, this feature is added to the `features` cache. Attributes ---------- mean : ndarray Mean vector of the initial state distribution. covariance : ndarray Covariance matrix of the initial state distribution. track_id : int A unique track identifier. hits : int Total number of measurement updates. age : int Total number of frames since first occurance. time_since_update : int Total number of frames since last measurement update. state : TrackState The current track state. features : List[ndarray] A cache of features. On each measurement update, the associated feature vector is added to this list. """ def __init__(self, detection, track_id, class_id, conf, n_init, max_age, ema_alpha, feature=None): self.track_id = track_id self.class_id = int(class_id) self.hits = 1 self.age = 1 self.time_since_update = 0 self.max_num_updates_wo_assignment = 7 self.updates_wo_assignment = 0 self.ema_alpha = ema_alpha self.state = TrackState.Tentative self.features = [] if feature is not None: feature /= np.linalg.norm(feature) self.features.append(feature) self.conf = conf self._n_init = n_init self._max_age = max_age self.kf = KalmanFilter() self.mean, self.covariance = self.kf.initiate(detection) # Initializing trajectory queue self.q = deque(maxlen=25) def to_tlwh(self): """Get current position in bounding box format `(top left x, top left y, width, height)`. Returns ------- ndarray The bounding box. """ ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= ret[2:] / 2 return ret def to_tlbr(self): """Get kf estimated current position in bounding box format `(min x, miny, max x, max y)`. Returns ------- ndarray The predicted kf bounding box. """ ret = self.to_tlwh() ret[2:] = ret[:2] + ret[2:] return ret def ECC(self, src, dst, warp_mode = cv2.MOTION_EUCLIDEAN, eps = 1e-5, max_iter = 100, scale = 0.1, align = False): """Compute the warp matrix from src to dst. Parameters ---------- src : ndarray An NxM matrix of source img(BGR or Gray), it must be the same format as dst. dst : ndarray An NxM matrix of target img(BGR or Gray). warp_mode: flags of opencv translation: cv2.MOTION_TRANSLATION rotated and shifted: cv2.MOTION_EUCLIDEAN affine(shift,rotated,shear): cv2.MOTION_AFFINE homography(3d): cv2.MOTION_HOMOGRAPHY eps: float the threshold of the increment in the correlation coefficient between two iterations max_iter: int the number of iterations. scale: float or [int, int] scale_ratio: float scale_size: [W, H] align: bool whether to warp affine or perspective transforms to the source image Returns ------- warp matrix : ndarray Returns the warp matrix from src to dst. if motion models is homography, the warp matrix will be 3x3, otherwise 2x3 src_aligned: ndarray aligned source image of gray """ # BGR2GRAY if src.ndim == 3: # Convert images to grayscale src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) # make the imgs smaller to speed up if scale is not None: if isinstance(scale, float) or isinstance(scale, int): if scale != 1: src_r = cv2.resize(src, (0, 0), fx = scale, fy = scale,interpolation = cv2.INTER_LINEAR) dst_r = cv2.resize(dst, (0, 0), fx = scale, fy = scale,interpolation = cv2.INTER_LINEAR) scale = [scale, scale] else: src_r, dst_r = src, dst scale = None else: if scale[0] != src.shape[1] and scale[1] != src.shape[0]: src_r = cv2.resize(src, (scale[0], scale[1]), interpolation = cv2.INTER_LINEAR) dst_r = cv2.resize(dst, (scale[0], scale[1]), interpolation=cv2.INTER_LINEAR) scale = [scale[0] / src.shape[1], scale[1] / src.shape[0]] else: src_r, dst_r = src, dst scale = None else: src_r, dst_r = src, dst # Define 2x3 or 3x3 matrices and initialize the matrix to identity if warp_mode == cv2.MOTION_HOMOGRAPHY : warp_matrix = np.eye(3, 3, dtype=np.float32) else : warp_matrix = np.eye(2, 3, dtype=np.float32) # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iter, eps) # Run the ECC algorithm. The results are stored in warp_matrix. try: (cc, warp_matrix) = cv2.findTransformECC (src_r, dst_r, warp_matrix, warp_mode, criteria, None, 1) except cv2.error as e: print('ecc transform failed') return None, None if scale is not None: warp_matrix[0, 2] = warp_matrix[0, 2] / scale[0] warp_matrix[1, 2] = warp_matrix[1, 2] / scale[1] if align: sz = src.shape if warp_mode == cv2.MOTION_HOMOGRAPHY: # Use warpPerspective for Homography src_aligned = cv2.warpPerspective(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR) else : # Use warpAffine for Translation, Euclidean and Affine src_aligned = cv2.warpAffine(src, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR) return warp_matrix, src_aligned else: return warp_matrix, None def get_matrix(self, matrix): eye = np.eye(3) dist = np.linalg.norm(eye - matrix) if dist < 100: return matrix else: return eye def camera_update(self, previous_frame, next_frame): warp_matrix, src_aligned = self.ECC(previous_frame, next_frame) if warp_matrix is None and src_aligned is None: return [a,b] = warp_matrix warp_matrix=np.array([a,b,[0,0,1]]) warp_matrix = warp_matrix.tolist() matrix = self.get_matrix(warp_matrix) x1, y1, x2, y2 = self.to_tlbr() x1_, y1_, _ = matrix @ np.array([x1, y1, 1]).T x2_, y2_, _ = matrix @ np.array([x2, y2, 1]).T w, h = x2_ - x1_, y2_ - y1_ cx, cy = x1_ + w / 2, y1_ + h / 2 self.mean[:4] = [cx, cy, w / h, h] def increment_age(self): self.age += 1 self.time_since_update += 1 def predict(self, kf): """Propagate the state distribution to the current time step using a Kalman filter prediction step. Parameters ---------- kf : kalman_filter.KalmanFilter The Kalman filter. """ self.mean, self.covariance = self.kf.predict(self.mean, self.covariance) self.age += 1 self.time_since_update += 1 def update_kf(self, bbox, confidence=0.5): self.updates_wo_assignment = self.updates_wo_assignment + 1 self.mean, self.covariance = self.kf.update(self.mean, self.covariance, bbox, confidence) tlbr = self.to_tlbr() x_c = int((tlbr[0] + tlbr[2]) / 2) y_c = int((tlbr[1] + tlbr[3]) / 2) self.q.append(('predupdate', (x_c, y_c))) def update(self, detection, class_id, conf): """Perform Kalman filter measurement update step and update the feature cache. Parameters ---------- detection : Detection The associated detection. """ self.conf = conf self.class_id = class_id.int() self.mean, self.covariance = self.kf.update(self.mean, self.covariance, detection.to_xyah(), detection.confidence) feature = detection.feature / np.linalg.norm(detection.feature) smooth_feat = self.ema_alpha * self.features[-1] + (1 - self.ema_alpha) * feature smooth_feat /= np.linalg.norm(smooth_feat) self.features = [smooth_feat] self.hits += 1 self.time_since_update = 0 if self.state == TrackState.Tentative and self.hits >= self._n_init: self.state = TrackState.Confirmed tlbr = self.to_tlbr() x_c = int((tlbr[0] + tlbr[2]) / 2) y_c = int((tlbr[1] + tlbr[3]) / 2) self.q.append(('observationupdate', (x_c, y_c))) def mark_missed(self): """Mark this track as missed (no association at the current time step). """ if self.state == TrackState.Tentative: self.state = TrackState.Deleted elif self.time_since_update > self._max_age: self.state = TrackState.Deleted def is_tentative(self): """Returns True if this track is tentative (unconfirmed). """ return self.state == TrackState.Tentative def is_confirmed(self): """Returns True if this track is confirmed.""" return self.state == TrackState.Confirmed def is_deleted(self): """Returns True if this track is dead and should be deleted.""" return self.state == TrackState.Deleted ================================================ FILE: DLTA_AI_app/trackers/strongsort/sort/tracker.py ================================================ # vim: expandtab:ts=4:sw=4 from __future__ import absolute_import import numpy as np from . import kalman_filter from . import linear_assignment from . import iou_matching from . import detection from .track import Track class Tracker: """ This is the multi-target tracker. Parameters ---------- metric : nn_matching.NearestNeighborDistanceMetric A distance metric for measurement-to-track association. max_age : int Maximum number of missed misses before a track is deleted. n_init : int Number of consecutive detections before the track is confirmed. The track state is set to `Deleted` if a miss occurs within the first `n_init` frames. Attributes ---------- metric : nn_matching.NearestNeighborDistanceMetric The distance metric used for measurement to track association. max_age : int Maximum number of missed misses before a track is deleted. n_init : int Number of frames that a track remains in initialization phase. kf : kalman_filter.KalmanFilter A Kalman filter to filter target trajectories in image space. tracks : List[Track] The list of active tracks at the current time step. """ GATING_THRESHOLD = np.sqrt(kalman_filter.chi2inv95[4]) def __init__(self, metric, max_iou_dist=0.9, max_age=30, max_unmatched_preds=7, n_init=3, _lambda=0, ema_alpha=0.9, mc_lambda=0.995): self.metric = metric self.max_iou_dist = max_iou_dist self.max_age = max_age self.n_init = n_init self._lambda = _lambda self.ema_alpha = ema_alpha self.mc_lambda = mc_lambda self.max_unmatched_preds = max_unmatched_preds self.kf = kalman_filter.KalmanFilter() self.tracks = [] self._next_id = 1 def predict(self): """Propagate track state distributions one time step forward. This function should be called once every time step, before `update`. """ for track in self.tracks: track.predict(self.kf) def increment_ages(self): for track in self.tracks: track.increment_age() track.mark_missed() def camera_update(self, previous_img, current_img): for track in self.tracks: track.camera_update(previous_img, current_img) def pred_n_update_all_tracks(self): """Perform predictions and updates for all tracks by its own predicted state. """ self.predict() for t in self.tracks: if self.max_unmatched_preds != 0 and t.updates_wo_assignment < t.max_num_updates_wo_assignment: bbox = t.to_tlwh() t.update_kf(detection.to_xyah_ext(bbox)) def update(self, detections, classes, confidences): """Perform measurement update and track management. Parameters ---------- detections : List[deep_sort.detection.Detection] A list of detections at the current time step. """ # Run matching cascade. matches, unmatched_tracks, unmatched_detections = \ self._match(detections) # Update track set. for track_idx, detection_idx in matches: self.tracks[track_idx].update( detections[detection_idx], classes[detection_idx], confidences[detection_idx]) for track_idx in unmatched_tracks: self.tracks[track_idx].mark_missed() if self.max_unmatched_preds != 0 and self.tracks[track_idx].updates_wo_assignment < self.tracks[track_idx].max_num_updates_wo_assignment: bbox = self.tracks[track_idx].to_tlwh() self.tracks[track_idx].update_kf(detection.to_xyah_ext(bbox)) for detection_idx in unmatched_detections: self._initiate_track(detections[detection_idx], classes[detection_idx].item(), confidences[detection_idx].item()) self.tracks = [t for t in self.tracks if not t.is_deleted()] # Update distance metric. active_targets = [t.track_id for t in self.tracks if t.is_confirmed()] features, targets = [], [] for track in self.tracks: if not track.is_confirmed(): continue features += track.features targets += [track.track_id for _ in track.features] self.metric.partial_fit(np.asarray(features), np.asarray(targets), active_targets) def _full_cost_metric(self, tracks, dets, track_indices, detection_indices): """ This implements the full lambda-based cost-metric. However, in doing so, it disregards the possibility to gate the position only which is provided by linear_assignment.gate_cost_matrix(). Instead, I gate by everything. Note that the Mahalanobis distance is itself an unnormalised metric. Given the cosine distance being normalised, we employ a quick and dirty normalisation based on the threshold: that is, we divide the positional-cost by the gating threshold, thus ensuring that the valid values range 0-1. Note also that the authors work with the squared distance. I also sqrt this, so that it is more intuitive in terms of values. """ # Compute First the Position-based Cost Matrix pos_cost = np.empty([len(track_indices), len(detection_indices)]) msrs = np.asarray([dets[i].to_xyah() for i in detection_indices]) for row, track_idx in enumerate(track_indices): pos_cost[row, :] = np.sqrt( self.kf.gating_distance( tracks[track_idx].mean, tracks[track_idx].covariance, msrs, False ) ) / self.GATING_THRESHOLD pos_gate = pos_cost > 1.0 # Now Compute the Appearance-based Cost Matrix app_cost = self.metric.distance( np.array([dets[i].feature for i in detection_indices]), np.array([tracks[i].track_id for i in track_indices]), ) app_gate = app_cost > self.metric.matching_threshold # Now combine and threshold cost_matrix = self._lambda * pos_cost + (1 - self._lambda) * app_cost cost_matrix[np.logical_or(pos_gate, app_gate)] = linear_assignment.INFTY_COST # Return Matrix return cost_matrix def _match(self, detections): def gated_metric(tracks, dets, track_indices, detection_indices): features = np.array([dets[i].feature for i in detection_indices]) targets = np.array([tracks[i].track_id for i in track_indices]) cost_matrix = self.metric.distance(features, targets) cost_matrix = linear_assignment.gate_cost_matrix(cost_matrix, tracks, dets, track_indices, detection_indices, self.mc_lambda) return cost_matrix # Split track set into confirmed and unconfirmed tracks. confirmed_tracks = [ i for i, t in enumerate(self.tracks) if t.is_confirmed()] unconfirmed_tracks = [ i for i, t in enumerate(self.tracks) if not t.is_confirmed()] # Associate confirmed tracks using appearance features. matches_a, unmatched_tracks_a, unmatched_detections = \ linear_assignment.matching_cascade( gated_metric, self.metric.matching_threshold, self.max_age, self.tracks, detections, confirmed_tracks) # Associate remaining tracks together with unconfirmed tracks using IOU. iou_track_candidates = unconfirmed_tracks + [ k for k in unmatched_tracks_a if self.tracks[k].time_since_update == 1] unmatched_tracks_a = [ k for k in unmatched_tracks_a if self.tracks[k].time_since_update != 1] matches_b, unmatched_tracks_b, unmatched_detections = \ linear_assignment.min_cost_matching( iou_matching.iou_cost, self.max_iou_dist, self.tracks, detections, iou_track_candidates, unmatched_detections) matches = matches_a + matches_b unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b)) return matches, unmatched_tracks, unmatched_detections def _initiate_track(self, detection, class_id, conf): self.tracks.append(Track( detection.to_xyah(), self._next_id, class_id, conf, self.n_init, self.max_age, self.ema_alpha, detection.feature)) self._next_id += 1 ================================================ FILE: DLTA_AI_app/trackers/strongsort/strong_sort.py ================================================ import numpy as np import torch import sys import cv2 import gdown from os.path import exists as file_exists, join import torchvision.transforms as transforms from .sort.nn_matching import NearestNeighborDistanceMetric from .sort.detection import Detection from .sort.tracker import Tracker from .reid_multibackend import ReIDDetectMultiBackend from ultralytics.yolo.utils.ops import xyxy2xywh class StrongSORT(object): def __init__(self, model_weights, device, fp16, max_dist=0.2, max_iou_dist=0.7, max_age=70, max_unmatched_preds=7, n_init=3, nn_budget=100, mc_lambda=0.995, ema_alpha=0.9 ): self.model = ReIDDetectMultiBackend(weights=model_weights, device=device, fp16=fp16) self.max_dist = max_dist metric = NearestNeighborDistanceMetric( "cosine", self.max_dist, nn_budget) self.tracker = Tracker( metric, max_iou_dist=max_iou_dist, max_age=max_age, n_init=n_init, max_unmatched_preds=max_unmatched_preds, mc_lambda=mc_lambda, ema_alpha=ema_alpha) def update(self, dets, ori_img): xyxys = dets[:, 0:4] confs = dets[:, 4] clss = dets[:, 5] classes = clss.numpy() xywhs = xyxy2xywh(xyxys.numpy()) confs = confs.numpy() self.height, self.width = ori_img.shape[:2] # generate detections features = self._get_features(xywhs, ori_img) bbox_tlwh = self._xywh_to_tlwh(xywhs) detections = [Detection(bbox_tlwh[i], conf, features[i]) for i, conf in enumerate( confs)] # run on non-maximum supression boxes = np.array([d.tlwh for d in detections]) scores = np.array([d.confidence for d in detections]) # update tracker self.tracker.predict() self.tracker.update(detections, clss, confs) # output bbox identities outputs = [] for track in self.tracker.tracks: if not track.is_confirmed() or track.time_since_update > 1: continue box = track.to_tlwh() x1, y1, x2, y2 = self._tlwh_to_xyxy(box) track_id = track.track_id class_id = track.class_id conf = track.conf queue = track.q outputs.append(np.array([x1, y1, x2, y2, track_id, class_id, conf, queue], dtype=object)) if len(outputs) > 0: outputs = np.stack(outputs, axis=0) return outputs """ TODO: Convert bbox from xc_yc_w_h to xtl_ytl_w_h Thanks JieChen91@github.com for reporting this bug! """ @staticmethod def _xywh_to_tlwh(bbox_xywh): if isinstance(bbox_xywh, np.ndarray): bbox_tlwh = bbox_xywh.copy() elif isinstance(bbox_xywh, torch.Tensor): bbox_tlwh = bbox_xywh.clone() bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2. bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2. return bbox_tlwh def _xywh_to_xyxy(self, bbox_xywh): x, y, w, h = bbox_xywh x1 = max(int(x - w / 2), 0) x2 = min(int(x + w / 2), self.width - 1) y1 = max(int(y - h / 2), 0) y2 = min(int(y + h / 2), self.height - 1) return x1, y1, x2, y2 def _tlwh_to_xyxy(self, bbox_tlwh): """ TODO: Convert bbox from xtl_ytl_w_h to xc_yc_w_h Thanks JieChen91@github.com for reporting this bug! """ x, y, w, h = bbox_tlwh x1 = max(int(x), 0) x2 = min(int(x+w), self.width - 1) y1 = max(int(y), 0) y2 = min(int(y+h), self.height - 1) return x1, y1, x2, y2 def increment_ages(self): self.tracker.increment_ages() def _xyxy_to_tlwh(self, bbox_xyxy): x1, y1, x2, y2 = bbox_xyxy t = x1 l = y1 w = int(x2 - x1) h = int(y2 - y1) return t, l, w, h def _get_features(self, bbox_xywh, ori_img): im_crops = [] for box in bbox_xywh: x1, y1, x2, y2 = self._xywh_to_xyxy(box) im = ori_img[y1:y2, x1:x2] im_crops.append(im) if im_crops: features = self.model(im_crops) else: features = np.array([]) return features def trajectory(self, im0, q, color): # Add rectangle to image (PIL-only) for i, p in enumerate(q): thickness = int(np.sqrt(float (i + 1)) * 1.5) if p[0] == 'observationupdate': cv2.circle(im0, p[1], 2, color=color, thickness=thickness) else: cv2.circle(im0, p[1], 2, color=(255,255,255), thickness=thickness) ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/__init__.py ================================================ ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/asserts.py ================================================ from os import environ def assert_in(file, files_to_check): if file not in files_to_check: raise AssertionError("{} does not exist in the list".format(str(file))) return True def assert_in_env(check_list: list): for item in check_list: assert_in(item, environ.keys()) return True ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/draw.py ================================================ import numpy as np import cv2 palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1) def compute_color_for_labels(label): """ Simple function that adds fixed color depending on the class """ color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette] return tuple(color) def draw_boxes(img, bbox, identities=None, offset=(0,0)): for i,box in enumerate(bbox): x1,y1,x2,y2 = [int(i) for i in box] x1 += offset[0] x2 += offset[0] y1 += offset[1] y2 += offset[1] # box text and bar id = int(identities[i]) if identities is not None else 0 color = compute_color_for_labels(id) label = '{}{:d}'.format("", id) t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0] cv2.rectangle(img,(x1, y1),(x2,y2),color,3) cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1) cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2) return img if __name__ == '__main__': for i in range(82): print(compute_color_for_labels(i)) ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/evaluation.py ================================================ import os import numpy as np import copy import motmetrics as mm mm.lap.default_solver = 'lap' from utils.io import read_results, unzip_objs class Evaluator(object): def __init__(self, data_root, seq_name, data_type): self.data_root = data_root self.seq_name = seq_name self.data_type = data_type self.load_annotations() self.reset_accumulator() def load_annotations(self): assert self.data_type == 'mot' gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt') self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True) self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True) def reset_accumulator(self): self.acc = mm.MOTAccumulator(auto_id=True) def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False): # results trk_tlwhs = np.copy(trk_tlwhs) trk_ids = np.copy(trk_ids) # gts gt_objs = self.gt_frame_dict.get(frame_id, []) gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2] # ignore boxes ignore_objs = self.gt_ignore_frame_dict.get(frame_id, []) ignore_tlwhs = unzip_objs(ignore_objs)[0] # remove ignored results keep = np.ones(len(trk_tlwhs), dtype=bool) iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5) if len(iou_distance) > 0: match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) match_ious = iou_distance[match_is, match_js] match_js = np.asarray(match_js, dtype=int) match_js = match_js[np.logical_not(np.isnan(match_ious))] keep[match_js] = False trk_tlwhs = trk_tlwhs[keep] trk_ids = trk_ids[keep] # get distance matrix iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5) # acc self.acc.update(gt_ids, trk_ids, iou_distance) if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'): events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics else: events = None return events def eval_file(self, filename): self.reset_accumulator() result_frame_dict = read_results(filename, self.data_type, is_gt=False) frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys()))) for frame_id in frames: trk_objs = result_frame_dict.get(frame_id, []) trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2] self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False) return self.acc @staticmethod def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')): names = copy.deepcopy(names) if metrics is None: metrics = mm.metrics.motchallenge_metrics metrics = copy.deepcopy(metrics) mh = mm.metrics.create() summary = mh.compute_many( accs, metrics=metrics, names=names, generate_overall=True ) return summary @staticmethod def save_summary(summary, filename): import pandas as pd writer = pd.ExcelWriter(filename) summary.to_excel(writer) writer.save() ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/io.py ================================================ import os from typing import Dict import numpy as np # from utils.log import get_logger def write_results(filename, results, data_type): if data_type == 'mot': save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n' elif data_type == 'kitti': save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n' else: raise ValueError(data_type) with open(filename, 'w') as f: for frame_id, tlwhs, track_ids in results: if data_type == 'kitti': frame_id -= 1 for tlwh, track_id in zip(tlwhs, track_ids): if track_id < 0: continue x1, y1, w, h = tlwh x2, y2 = x1 + w, y1 + h line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h) f.write(line) # def write_results(filename, results_dict: Dict, data_type: str): # if not filename: # return # path = os.path.dirname(filename) # if not os.path.exists(path): # os.makedirs(path) # if data_type in ('mot', 'mcmot', 'lab'): # save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n' # elif data_type == 'kitti': # save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n' # else: # raise ValueError(data_type) # with open(filename, 'w') as f: # for frame_id, frame_data in results_dict.items(): # if data_type == 'kitti': # frame_id -= 1 # for tlwh, track_id in frame_data: # if track_id < 0: # continue # x1, y1, w, h = tlwh # x2, y2 = x1 + w, y1 + h # line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0) # f.write(line) # logger.info('Save results to {}'.format(filename)) def read_results(filename, data_type: str, is_gt=False, is_ignore=False): if data_type in ('mot', 'lab'): read_fun = read_mot_results else: raise ValueError('Unknown data type: {}'.format(data_type)) return read_fun(filename, is_gt, is_ignore) """ labels={'ped', ... % 1 'person_on_vhcl', ... % 2 'car', ... % 3 'bicycle', ... % 4 'mbike', ... % 5 'non_mot_vhcl', ... % 6 'static_person', ... % 7 'distractor', ... % 8 'occluder', ... % 9 'occluder_on_grnd', ... %10 'occluder_full', ... % 11 'reflection', ... % 12 'crowd' ... % 13 }; """ def read_mot_results(filename, is_gt, is_ignore): valid_labels = {1} ignore_labels = {2, 7, 8, 12} results_dict = dict() if os.path.isfile(filename): with open(filename, 'r') as f: for line in f.readlines(): linelist = line.split(',') if len(linelist) < 7: continue fid = int(linelist[0]) if fid < 1: continue results_dict.setdefault(fid, list()) if is_gt: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) mark = int(float(linelist[6])) if mark == 0 or label not in valid_labels: continue score = 1 elif is_ignore: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) vis_ratio = float(linelist[8]) if label not in ignore_labels and vis_ratio >= 0: continue else: continue score = 1 else: score = float(linelist[6]) tlwh = tuple(map(float, linelist[2:6])) target_id = int(linelist[1]) results_dict[fid].append((tlwh, target_id, score)) return results_dict def unzip_objs(objs): if len(objs) > 0: tlwhs, ids, scores = zip(*objs) else: tlwhs, ids, scores = [], [], [] tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) return tlwhs, ids, scores ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/json_logger.py ================================================ """ References: https://medium.com/analytics-vidhya/creating-a-custom-logging-mechanism-for-real-time-object-detection-using-tdd-4ca2cfcd0a2f """ import json from os import makedirs from os.path import exists, join from datetime import datetime class JsonMeta(object): HOURS = 3 MINUTES = 59 SECONDS = 59 PATH_TO_SAVE = 'LOGS' DEFAULT_FILE_NAME = 'remaining' class BaseJsonLogger(object): """ This is the base class that returns __dict__ of its own it also returns the dicts of objects in the attributes that are list instances """ def dic(self): # returns dicts of objects out = {} for k, v in self.__dict__.items(): if hasattr(v, 'dic'): out[k] = v.dic() elif isinstance(v, list): out[k] = self.list(v) else: out[k] = v return out @staticmethod def list(values): # applies the dic method on items in the list return [v.dic() if hasattr(v, 'dic') else v for v in values] class Label(BaseJsonLogger): """ For each bounding box there are various categories with confidences. Label class keeps track of that information. """ def __init__(self, category: str, confidence: float): self.category = category self.confidence = confidence class Bbox(BaseJsonLogger): """ This module stores the information for each frame and use them in JsonParser Attributes: labels (list): List of label module. top (int): left (int): width (int): height (int): Args: bbox_id (float): top (int): left (int): width (int): height (int): References: Check Label module for better understanding. """ def __init__(self, bbox_id, top, left, width, height): self.labels = [] self.bbox_id = bbox_id self.top = top self.left = left self.width = width self.height = height def add_label(self, category, confidence): # adds category and confidence only if top_k is not exceeded. self.labels.append(Label(category, confidence)) def labels_full(self, value): return len(self.labels) == value class Frame(BaseJsonLogger): """ This module stores the information for each frame and use them in JsonParser Attributes: timestamp (float): The elapsed time of captured frame frame_id (int): The frame number of the captured video bboxes (list of Bbox objects): Stores the list of bbox objects. References: Check Bbox class for better information Args: timestamp (float): frame_id (int): """ def __init__(self, frame_id: int, timestamp: float = None): self.frame_id = frame_id self.timestamp = timestamp self.bboxes = [] def add_bbox(self, bbox_id: int, top: int, left: int, width: int, height: int): bboxes_ids = [bbox.bbox_id for bbox in self.bboxes] if bbox_id not in bboxes_ids: self.bboxes.append(Bbox(bbox_id, top, left, width, height)) else: raise ValueError("Frame with id: {} already has a Bbox with id: {}".format(self.frame_id, bbox_id)) def add_label_to_bbox(self, bbox_id: int, category: str, confidence: float): bboxes = {bbox.id: bbox for bbox in self.bboxes} if bbox_id in bboxes.keys(): res = bboxes.get(bbox_id) res.add_label(category, confidence) else: raise ValueError('the bbox with id: {} does not exists!'.format(bbox_id)) class BboxToJsonLogger(BaseJsonLogger): """ ُ This module is designed to automate the task of logging jsons. An example json is used to show the contents of json file shortly Example: { "video_details": { "frame_width": 1920, "frame_height": 1080, "frame_rate": 20, "video_name": "/home/gpu/codes/MSD/pedestrian_2/project/public/camera1.avi" }, "frames": [ { "frame_id": 329, "timestamp": 3365.1254 "bboxes": [ { "labels": [ { "category": "pedestrian", "confidence": 0.9 } ], "bbox_id": 0, "top": 1257, "left": 138, "width": 68, "height": 109 } ] }], Attributes: frames (dict): It's a dictionary that maps each frame_id to json attributes. video_details (dict): information about video file. top_k_labels (int): shows the allowed number of labels start_time (datetime object): we use it to automate the json output by time. Args: top_k_labels (int): shows the allowed number of labels """ def __init__(self, top_k_labels: int = 1): self.frames = {} self.video_details = self.video_details = dict(frame_width=None, frame_height=None, frame_rate=None, video_name=None) self.top_k_labels = top_k_labels self.start_time = datetime.now() def set_top_k(self, value): self.top_k_labels = value def frame_exists(self, frame_id: int) -> bool: """ Args: frame_id (int): Returns: bool: true if frame_id is recognized """ return frame_id in self.frames.keys() def add_frame(self, frame_id: int, timestamp: float = None) -> None: """ Args: frame_id (int): timestamp (float): opencv captured frame time property Raises: ValueError: if frame_id would not exist in class frames attribute Returns: None """ if not self.frame_exists(frame_id): self.frames[frame_id] = Frame(frame_id, timestamp) else: raise ValueError("Frame id: {} already exists".format(frame_id)) def bbox_exists(self, frame_id: int, bbox_id: int) -> bool: """ Args: frame_id: bbox_id: Returns: bool: if bbox exists in frame bboxes list """ bboxes = [] if self.frame_exists(frame_id=frame_id): bboxes = [bbox.bbox_id for bbox in self.frames[frame_id].bboxes] return bbox_id in bboxes def find_bbox(self, frame_id: int, bbox_id: int): """ Args: frame_id: bbox_id: Returns: bbox_id (int): Raises: ValueError: if bbox_id does not exist in the bbox list of specific frame. """ if not self.bbox_exists(frame_id, bbox_id): raise ValueError("frame with id: {} does not contain bbox with id: {}".format(frame_id, bbox_id)) bboxes = {bbox.bbox_id: bbox for bbox in self.frames[frame_id].bboxes} return bboxes.get(bbox_id) def add_bbox_to_frame(self, frame_id: int, bbox_id: int, top: int, left: int, width: int, height: int) -> None: """ Args: frame_id (int): bbox_id (int): top (int): left (int): width (int): height (int): Returns: None Raises: ValueError: if bbox_id already exist in frame information with frame_id ValueError: if frame_id does not exist in frames attribute """ if self.frame_exists(frame_id): frame = self.frames[frame_id] if not self.bbox_exists(frame_id, bbox_id): frame.add_bbox(bbox_id, top, left, width, height) else: raise ValueError( "frame with frame_id: {} already contains the bbox with id: {} ".format(frame_id, bbox_id)) else: raise ValueError("frame with frame_id: {} does not exist".format(frame_id)) def add_label_to_bbox(self, frame_id: int, bbox_id: int, category: str, confidence: float): """ Args: frame_id: bbox_id: category: confidence: the confidence value returned from yolo detection Returns: None Raises: ValueError: if labels quota (top_k_labels) exceeds. """ bbox = self.find_bbox(frame_id, bbox_id) if not bbox.labels_full(self.top_k_labels): bbox.add_label(category, confidence) else: raise ValueError("labels in frame_id: {}, bbox_id: {} is fulled".format(frame_id, bbox_id)) def add_video_details(self, frame_width: int = None, frame_height: int = None, frame_rate: int = None, video_name: str = None): self.video_details['frame_width'] = frame_width self.video_details['frame_height'] = frame_height self.video_details['frame_rate'] = frame_rate self.video_details['video_name'] = video_name def output(self): output = {'video_details': self.video_details} result = list(self.frames.values()) output['frames'] = [item.dic() for item in result] return output def json_output(self, output_name): """ Args: output_name: Returns: None Notes: It creates the json output with `output_name` name. """ if not output_name.endswith('.json'): output_name += '.json' with open(output_name, 'w') as file: json.dump(self.output(), file) file.close() def set_start(self): self.start_time = datetime.now() def schedule_output_by_time(self, output_dir=JsonMeta.PATH_TO_SAVE, hours: int = 0, minutes: int = 0, seconds: int = 60) -> None: """ Notes: Creates folder and then periodically stores the jsons on that address. Args: output_dir (str): the directory where output files will be stored hours (int): minutes (int): seconds (int): Returns: None """ end = datetime.now() interval = 0 interval += abs(min([hours, JsonMeta.HOURS]) * 3600) interval += abs(min([minutes, JsonMeta.MINUTES]) * 60) interval += abs(min([seconds, JsonMeta.SECONDS])) diff = (end - self.start_time).seconds if diff > interval: output_name = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '.json' if not exists(output_dir): makedirs(output_dir) output = join(output_dir, output_name) self.json_output(output_name=output) self.frames = {} self.start_time = datetime.now() def schedule_output_by_frames(self, frames_quota, frame_counter, output_dir=JsonMeta.PATH_TO_SAVE): """ saves as the number of frames quota increases higher. :param frames_quota: :param frame_counter: :param output_dir: :return: """ pass def flush(self, output_dir): """ Notes: We use this function to output jsons whenever possible. like the time that we exit the while loop of opencv. Args: output_dir: Returns: None """ filename = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '-remaining.json' output = join(output_dir, filename) self.json_output(output_name=output) ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/log.py ================================================ import logging def get_logger(name='root'): formatter = logging.Formatter( # fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s') fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') handler = logging.StreamHandler() handler.setFormatter(formatter) logger = logging.getLogger(name) logger.setLevel(logging.INFO) logger.addHandler(handler) return logger ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/parser.py ================================================ import os import yaml from easydict import EasyDict as edict class YamlParser(edict): """ This is yaml parser based on EasyDict. """ def __init__(self, cfg_dict=None, config_file=None): if cfg_dict is None: cfg_dict = {} if config_file is not None: assert(os.path.isfile(config_file)) with open(config_file, 'r') as fo: yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader) cfg_dict.update(yaml_) super(YamlParser, self).__init__(cfg_dict) def merge_from_file(self, config_file): with open(config_file, 'r') as fo: yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader) self.update(yaml_) def merge_from_dict(self, config_dict): self.update(config_dict) def get_config(config_file=None): return YamlParser(config_file=config_file) if __name__ == "__main__": cfg = YamlParser(config_file="../configs/yolov3.yaml") cfg.merge_from_file("../configs/strong_sort.yaml") import ipdb ipdb.set_trace() ================================================ FILE: DLTA_AI_app/trackers/strongsort/utils/tools.py ================================================ from functools import wraps from time import time def is_video(ext: str): """ Returns true if ext exists in allowed_exts for video files. Args: ext: Returns: """ allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp') return any((ext.endswith(x) for x in allowed_exts)) def tik_tok(func): """ keep track of time for each process. Args: func: Returns: """ @wraps(func) def _time_it(*args, **kwargs): start = time() try: return func(*args, **kwargs) finally: end_ = time() print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start))) return _time_it ================================================ FILE: LICENSE ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: MANIFEST.in ================================================ recursive-include . * ================================================ FILE: README.md ================================================


Data Labeling, Tracking and Annotation with AI

DLTA-AI is the next generation of annotation tools, integrating the power of Computer Vision SOTA models to
Labelme in a seamless expirence and intuitive workflow to make creating image datasets easier than ever before [![User Guide](https://img.shields.io/badge/User%20Guide-blue)](https://0ssamaak0.github.io/DLTA-AI/) [![Youtube Channel](https://img.shields.io/youtube/channel/views/UCJi8OFF-fzwGSAC8BWJ0BhQ)](https://www.youtube.com/@DLTA-AI) [![Discord Server](https://img.shields.io/discord/1130938906616004638)](https://discord.gg/9Rmwcnc4) [![PyPI - Downloads](https://img.shields.io/pypi/dm/DLTA-AI)](https://pypi.org/project/DLTA-AI/) [![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/0ssamaak0/DLTA-AI?include_prereleases)](https://github.com/0ssamaak0/DLTA-AI/releases) [![GitHub issues](https://img.shields.io/github/issues/0ssamaak0/DLTA-AI)](https://github.com/0ssamaak0/DLTA-AI/issues) [![GitHub last commit](https://img.shields.io/github/last-commit/0ssamaak0/DLTA-AI)](https://github.com/0ssamaak0/DLTA-AI/commits) [![GitHub License](https://img.shields.io/github/license/0ssamaak0/DLTA-AI)](https://github.com/0ssamaak0/DLTA-AI/blob/master/LICENSE) ![gif_main](https://github.com/0ssamaak0/DLTA-AI/blob/master/assets/gif_main.gif?raw=true) [Installation](#installation-%EF%B8%8F) 🛠️ | [Segment Anything](#Segment-Anything-) 🪄 | [Model Selection](#model-selection-) 🤖 | [Segmentation](#segmentation-) 🎨 | [Object Tracking](#object-tracking-) 🚗 | [Export](#export-) 📤 | [Other Features](#other-features-) 🌟| [Contributing](#contributing-) 🤝| [Acknowledgements](#acknowledgements-)🙏| [Resources](#resources-) 🌐 | [License](#license-) 📜
# Installation 🛠️ After creating a new environment, installing Pytorch to it, you can install DLTA-AI using pip ``` pip install DLTA-AI ``` and run it using ``` DLTA-AI ``` Check the [Installation section in User Guide](https://0ssamaak0.github.io/DLTA-AI/installation/full-installation/) for more details, different installation options and solutions for common issues. # Segment Anything 🪄 DLTA-AI takes the Annotation to the next level by integrating lastest Meta models [Segment Anything (SAM)](https://github.com/facebookresearch/segment-anything) to support zero-shot segmentation for any class **SAM** can be used also to improve the quality of Segmentation, even inaccurate polygons around the object is enough to be segmented correctly **SAM** doesn't only work for Segmentation tasks, it's build in the video mode to support **Object Tracking** as well for any class
![Segment Anything](https://github.com/0ssamaak0/DLTA-AI/blob/master/assets/SAM.gif?raw=true)
# Model Selection 🤖 For model selection, DLTA-AI provides the **Model Explorer** to utilize the power of the numerous models in [mmdetection](https://github.com/open-mmlab/mmdetection/tree/2.x) and [ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) as well as the models of [SAM](https://github.com/facebookresearch/segment-anything) the to give the user the ability to compare, download and select from the library of models
![Model Explorer](https://github.com/0ssamaak0/DLTA-AI/blob/master/assets/model_explorer.png?raw=true)
# Segmentation 🎨 Using the models from the **Model Explorer**, DLTA-AI provides a seamless expirence to annotate single image or batch of images, with options to select classes, modify threshold, and full control to edit the segmentation results.
![Segmentation](https://github.com/0ssamaak0/DLTA-AI/blob/master/assets/segmentation.png?raw=true)
and as mentioned before, **SAM** is fully integrated in DLTA-AI to provide zero-shot segmentation for any class, and to improve the quality of segmentation # Object Tracking 🚗 Built on top of the segmentation and detection models, DLTA-AI provides a complete solution for Object Tracking, with 5 different models for tracking To import DLTA-AI have options for video navigation, tracking settings and different visualization options with the ability to export the tracking results to a video file Beside this, DLTA-AI provides a completely new way to modify the tracking results, including edit and delete propagation across frames
![Object Tracking](https://github.com/0ssamaak0/DLTA-AI/blob/master/assets/tracking.gif?raw=true)
Beside automatic tracking models, DLTA-AI provides different methods of interpolation and filling gaps between frames to fix occlusions and unpredicted behaviors in a semi-automatic way
![Interpolation](https://github.com/0ssamaak0/DLTA-AI/blob/master/assets/interpolation.png?raw=true)
# Export 📤 For Instance Segmentation, DLTA-AI provides to option to export the segmentation to standard COCO format, and the results of tracking to MOT format, and a video file for the tracking results with desired visualization options e.g., show id, bbox, class name, etc.
![Export](https://github.com/0ssamaak0/DLTA-AI/blob/master/assets/Export.png?raw=true)
DLTA-AI provides also the ability to add user-defined or custom export formats that can be used for any purpose, once the user defines his own format, it will be available in the export menu. # Other Features 🌟 - Threshold Selection (Confidence and IoU) - Select Classes (from 80 COCO classes) with option to save default classes - Track assigned objects only - Merging models (Run both models and merge the results) - Show Runtime Type (CPU/GPU) - Show GPU Memory Usage - Video Navigation (Frame by Frame, Fast Forward, Fast Backward, Play/Pause) - Light / Dark Theme Support (syncs with OS theme) - Fully Customizable UI (drag and drop, show/hide) - OS Notifications (for long running tasks) - using orjson for faster json serialization - additional script (external) to evaluate the results of segmentation (COCO) - additional script (external) to extract frames from a video file for future use - User shortcuts and preferences settings # Contributing 🤝 DLTA-AI is an open source project and contributions are very welcome, specially in this early stage of development. You can contribute in many ways: - Create an [issue](https://github.com/0ssamaak0/DLTA-AI/issues) Reporting bugs 🐞 or suggesting new features 🌟 or just give your feedback 📝 - Create a [pull request](https://github.com/0ssamaak0/DLTA-AI/pulls) to fix bugs or add new features, or just to improve the code quality, optimize performance, documentation, or even just to fix typos - Review [pull requests](https://github.com/0ssamaak0/DLTA-AI/pulls) and help with the code review process - Spread the word about DLTA-AI and help us grow the community 🌎, by sharing the project on social media, or just by telling your friends about it # Acknowledgements 🙏 This tool is part of a Graduation Project at [Faculty of Engineering, Ain Shams University](https://eng.asu.edu.eg/) under the supervision of: - [Dr. Karim Ismail](https://carleton.ca/cee/profile/karim-ismail/) - [Dr. Ahmed Osama](ahmed.osama@eng.asu.edu.eg) - Dr. Watheq El-Kharashy - [Eng. Yousra El-Qattan](https://www.linkedin.com/in/youssra-elqattan/) we want also to thank our friends who helped us with testing, feedback and suggestions: - [Eng. Houssam Siyoufi](https://www.linkedin.com/in/houssam-siyoufi-163627110/) - [Amin Mohamed](https://github.com/AminMohamed-3) - [Badr Mohamed](https://github.com/Badr-1) - [Ahmed Mahmoud](https://github.com/ahmedd-mahmoudd) - [Youssef Ashraf](https://github.com/0xNine9) - [Chadi Ashraf](https://github.com/Chady00) # Resources 🌐 - [Labelme](https://github.com/wkentaro/labelme) - [Segment Anything (SAM)](https://github.com/facebookresearch/segment-anything) - [MMDetection](https://github.com/open-mmlab/mmdetection/tree/2.x) - [ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) - [mikelbrostrom yolov8_tracking](https://github.com/mikel-brostrom/yolov8_tracking) - [orjson](https://github.com/ijl/orjson) - [icons8](https://icons8.com/) # License 📜 DLTA-AI is released under the [GPLv3 license](https://github.com/0ssamaak0/DLTA-AI/blob/master/LICENSE). ================================================ FILE: additional_scripts/coco_eval.py ================================================ import sys import os import json import argparse from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval # Define a class to suppress print statements class HiddenPrints: """ A context manager to suppress print statements. """ def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout # Define the function to evaluate coco def evaluate_coco(gt_file: str, pred_file: str, task: str = "bbox", evaluation_type: str = "full") -> None: """ Evaluates the performance of a COCO object detection model. Args: gt_file (str): Path to the ground truth file. pred_file (str): Path to the prediction file. task (str, optional): The type of task to evaluate (bbox or segm). Defaults to "bbox". evaluation_type (str, optional): The type of evaluation to perform (full or mAP). Defaults to "full". """ # Use HiddenPrints to suppress print statements with HiddenPrints(): # Load the ground truth file coco_gt = COCO(gt_file) # Load the prediction file with open(pred_file, 'r') as f: pred_file = json.load(f) pred_file = pred_file[0]['annotations'] # type: ignore coco_dt = coco_gt.loadRes(pred_file) # Create a COCO evaluator object coco_eval = COCOeval(coco_gt, coco_dt, task) # type: ignore # Evaluate the model coco_eval.evaluate() coco_eval.accumulate() # Compute stats coco_eval.summarize() # Print the results based on the evaluation type if evaluation_type == "full": coco_eval.summarize() elif evaluation_type == "mAP": print(f"{task} mAP: {coco_eval.stats[0]:.3f}") # Create an argument parser parser = argparse.ArgumentParser() parser.add_argument("--gt_file", required=True, help="ground truth file") parser.add_argument("--pred_file", required=True, help="prediction file") parser.add_argument("--task", default="bbox", choices=["bbox", "segm"], help="task (bbox or segm)") parser.add_argument("--evaluation_type", default="full", choices=["full", "mAP"], help="evaluation type (full or mAP)") # Parse the arguments args = parser.parse_args() # Run the function with the arguments evaluate_coco(args.gt_file, args.pred_file, args.task, args.evaluation_type) ================================================ FILE: docs/Installation/executable.md ================================================ --- label: Executable (CPU Only) order: 2 icon: ":package:" --- DLTA-AI is available as an executable, however it's CPU only, so it's not recommended for large datasets. It's currently available for windows and linux only you can download the [lastest release](https://github.com/0ssamaak0/DLTA-AI/releases) Executable under **Assets** The Executable doesn't require any installation, just download and run it from the executable file ![Executable image in file explorer](https://github.com/0ssamaak0/DLTA-AI/raw/master/docs/assets/exe.png?raw=true) ================================================ FILE: docs/Installation/full installation.md ================================================ --- label: full installation icon: ":inbox_tray:" order: 3 --- # Full Installation ## Create a Virtual Environment It is highly recommended to install DTLA-AI in virtual environment using conda. This will ensure a clean and isolated environment for the installation process. use `python=3.8` to avoid any compatibility issues ``` conda create -n DLTA-AI python=3.8 conda activate DLTA-AI ``` ## Install Pytorch First, you need to install [pytorch](https://pytorch.org/get-started/locally/) according to your device and your OS, if you have GPU, choose CUDA version, otherwise choose CPU version Example: ``` conda install pytorch torchvision torchaudio .... -c pytorch> ``` !!! Due to inconsistency between the current versions of `pytorch` and `mmcv`, some users may face issues when using `pytorch 2.x` if you face any issues, try to install `pytorch 1.13.1` instead !!! ## Option 1: Using pip Installation using pip is more easier since it handles all dependencies ``` pip install DLTA-AI ``` then run it from anywhere using ``` DLTA-AI ``` note that first time running DLTA-AI, it will download a required module, it may take some time you can also use pip for updating DLTA-AI ``` pip install DLTA-AI -U ``` ## Option 2: Manual Installation Download the lastest release from [here](https://github.com/0ssamaak0/DLTA-AI/releases) install requirements ``` pip install -r requirements.txt mim install mmcv-full==1.7.0 ``` then Run the tool from `DLTA_AI_app` directory ``` cd DLTA_AI_app python __main__.py ``` ================================================ FILE: docs/Installation/index.yml ================================================ label: Installation icon: ":hammer_and_wrench:" expanded: true order: 50 ================================================ FILE: docs/Installation/problems.md ================================================ --- label: possible problems order: 1 icon: ":interrobang:" --- # Solutions to possible problems ## Qt Platform Plugin Error in OpenCV on Linux Machines 🐧 some linux machines may have this problem ``` Could not load the Qt platform plugin "xcb" in "/home//miniconda3/envs/test/lib/python3.8/site-packages/cv2/qt/plugins" even though it was found. This application failed to start because no Qt platform plugin could be initialized. Reinstalling the application may fix this problem. Available platform plugins are: xcb, eglfs, linuxfb, minimal, minimalegl, offscreen, vnc, wayland-egl, wayland, wayland-xcomposite-egl, wayland-xcomposite-glx, webgl. ``` it can be solved simply be installing opencv-headless ``` pip3 install opencv-python-headless ``` ## Microsoft Visual C++ Build Tools Error When Installing MMDetection on Windows Machines 🪟 some windows machines may have this problem when installing **mmdet** ``` Building wheel for pycocotools (setup.py) ... error ... error: Microsoft Visual C++ 14.0 or greater is required. Get it with "Microsoft C++ Build Tools": https://visualstudio.microsoft.com/visual-cpp-build-tools/ ``` You can try ``` conda install -c conda-forge pycocotools ``` or just use Visual Studio installer to Install `MSVC v143 - VS 2022 C++ x64/x86 build tools (Latest)**` ## Problem in installing mmcv-full you may often stuck in installing `mmcv-full` with this message ``` Building wheels for collected packages: mmcv-full Building wheel for mmcv-full (setup.py) ... ``` you can try installing [pytorch 1.13.1](https://pytorch.org/get-started/previous-versions/#v1131), instead of the lastest version, you can also refer to [this issue](https://github.com/open-mmlab/mmcv/issues/1386) ## Multiple copies of the OpenMP runtime have been linked into the program you may encounter this problem ``` OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized. OMP: Hint This means that multiple copies of the OpenMP runtime have been linked into the program. That is dangerous, since it can degrade performance or cause incorrect results. The best thing to do is to ensure that only a single OpenMP runtime is linked into the process, e.g. by avoiding static linking of the OpenMP runtime in any library. As an unsafe, unsupported, undocumented workaround you can set the environment variable KMP_DUPLICATE_LIB_OK=TRUE to allow the program to continue to execute, but that may cause crashes or silently produce incorrect results. For more information, please see http://www.intel.com/software/products/support/. ``` you can solve this by upgrading numpy ``` pip install numpy==1.23.3 ``` Thanks for [mohamedraafat96's issue](https://github.com/0ssamaak0/DLTA-AI/issues/52), you can check this [stackoverflow answer](https://stackoverflow.com/questions/64209238/error-15-initializing-libiomp5md-dll-but-found-libiomp5md-dll-already-initial) for more details ================================================ FILE: docs/index.md ================================================ --- label: "DLTA-AI User Guide" icon: "assets/icon.png" --- # DLTA-AI User Guide ![DLTA-AI Preview](https://github.com/0ssamaak0/DLTA-AI/raw/master/assets/gif_main.gif?raw=true)

Data Labeling, Tracking and Annotation with AI.

DLTA-AI is the next generation of annotation tools, integrating the power of Computer Vision SOTA models to Labelme in a seamless expirence and intuitive workflow to make creating image datasets easier than ever before ## Why DLTA-AI? Open source and customizable annotation tool was created to fill a gap in annotation tools. The customization and giving the user the full control was and will be our priority, from the model selection, input formats and inference parameters, to the export formats and even the User Interface itself. From these options, the goal was to extend the use cases of the concept of annotation tool to other use cases for end users beyond just preparing datasets to train models. ## Features - Easy and straightforward Installation process, support for all Operating Systems - User Guide with detailed tutorials for all the features - Full Support of Auto Annotation with different models. - Different annotation options and parameters (e.g., Thresholds) - Export to (literally) any format - Modern and functional User Interface - Dedicated Video Mode - Object Tracking Support - Completely free and open-source, and will always be. ## Contributing DLTA-AI is an open source project and contributions are very welcome You can contribute in many ways: - Create an [issue](https://github.com/0ssamaak0/DLTA-AI/issues) Reporting bugs 🐞 or suggesting new features 🌟 or just give your feedback 📝 - Create a [pull request](https://github.com/0ssamaak0/DLTA-AI/pulls) to fix bugs or add new features, or just to improve the code quality, optimize performance, documentation, or even just to fix typos - Review [pull requests](https://github.com/0ssamaak0/DLTA-AI/pulls) and help with the code review process - Spread the word about DLTA-AI and help us grow the community 🌎, by sharing the project on social media, or just by telling your friends about it ## Resources - [Labelme](https://github.com/wkentaro/labelme) - [Segment Anything (SAM)](https://github.com/facebookresearch/segment-anything) - [MMDetection](https://github.com/open-mmlab/mmdetection/tree/2.x) - [ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) - [mikelbrostrom yolov8_tracking](https://github.com/mikel-brostrom/yolov8_tracking) - [orjson](https://github.com/ijl/orjson) - [icons8](https://icons8.com/) ================================================ FILE: docs/main_features/Export.md ================================================ --- icon: ":outbox_tray:" order: 3 --- # Export This page is under construction 🚧, please check back later. ================================================ FILE: docs/main_features/SAM.md ================================================ --- icon: https://github.com/0ssamaak0/DLTA-AI/blob/master/DLTA_AI_app/labelme/icons/SAM.png?raw=true order: 10 --- # Segment Anything (SAM) META AI model [Segment Anything](https://segment-anything.com/) or `SAM` is integrated in DLTA-AI in many ways to increase the accuracy of the Annotation process, in a very native user expirience with almost zero effort to install. ## Installation Like all other models, The [Model Explorer]("../model_selection/model_explorer.md") can be used to install the checkpoints directly with just a single click [!embed](https://youtu.be/8g15M9bE1uA?t=5) ## Segmentation Segment Anything can be used to make Zero-Shot Segmentation of any object. DLTA-AI provides an expiernce similar to the [Original Demo](https://segment-anything.com/demo#) with the simple SAM toolbar that supports user-customized shortcuts, and runs locally on the user machine on any image or video. [!embed](https://youtu.be/8g15M9bE1uA?t=41) ## Enhance Polygons Beside the usual functionality of Zero-Shot Segmentation, Segment Anything can be used to enhance the accuracy of any polygon, weather it was created by the user or by any other model, by simply selecting the polygon(s) and enhancing them from the toolbar or the context menu. [!embed](https://youtu.be/8g15M9bE1uA?t=84) ## Interpolation Tracking DLTA-AI utilizes the power of Segment Anything to provide a very accurate interpolation tracking, that can be used to track any object in a video, and can be used to track multiple objects at the same time. [!embed](https://youtu.be/8g15M9bE1uA?t=116) ================================================ FILE: docs/main_features/index.yml ================================================ label: Main Features icon: ":star2:" expanded: true order: 40 ================================================ FILE: docs/main_features/inputs.md ================================================ --- icon: ":clapper:" order: 8 --- # Input Modes DLTA-AI provides different options for inputs ## Image Mode image mode is very simple, just open an imgae and start annotating [!embed](https://youtu.be/zkm_GhX8OtM?t=6) ## Directory Mode Directory mode is used to annotate a directory of images, it's very useful when you have a dataset of images and you want to annotate them all at once. Note that it shows all images within the directory and all subdirectories. [!embed](https://youtu.be/zkm_GhX8OtM?t=17) ## Video Mode Video mode is used to annotate a video, and provides an integrated video player that allows you to naivgate, play, pause, forward, backward, and jump to a specific frame. [!embed](https://youtu.be/zkm_GhX8OtM?t=41) ## Video as Frames you can open a video as a directory of frames, this is useful when you want to just annotate some frames of a video. you have the option to set start and end frame, and also the sampling rate i.e., the step between frames. [!embed](https://youtu.be/zkm_GhX8OtM?t=71) ================================================ FILE: docs/main_features/segmentation.md ================================================ --- icon: ":art:" order: 9 --- # Segmentation Instance Segmentation is one of the major features in DLTA-AI, from the huge library to the different options and pramaters, to the ability to apply manual edits to the results, DLTA-AI proivdes a fully customizable and easy to use segmentation experience. ## Model Selection The model Selection can be done directly by selecting a segmentation model from the menu, or by selecting a model from the huge library of models in the [Model Explorer](../model_selection/model_explorer.md) [!embed](https://youtu.be/bYjy82Ug2wU?t=10) ## Inferencing The model can be run on the current image only (works in all [input modes](inputs.md)) or on all images (directory mode only) [!embed](https://youtu.be/bYjy82Ug2wU?t=28) ## Visualization Options you can select the visualization options from the menu, such as showing the segmentation mask, or just bounding box, and the class name and the cofidence scor as well [!embed](https://youtu.be/bYjy82Ug2wU?t=60) ## Select Classes you can select some classes among the 80 of [COCO classes](https://cocodataset.org/) you can select for just this use (forgotten when you close DLTA-AI) or set them as default classes (saved when you close DLTA-AI) [!embed](https://youtu.be/bYjy82Ug2wU?t=84) ## Thresholds To give the annotator the full control and the ability to choose the optimum point in the precision/recall tradeoff, DLTA-AI provides 2 thresholding options ### Confidence Threshold Confidence threshold is very simple, by just typing the threshold value of setting it through the slider, all predictions with confidence less than the threshold will be ignored [!embed](https://youtu.be/bYjy82Ug2wU?t=112) ### IOU Threshold (For Non Maximum Suppression) DLTA-AI internally applies Non Maximum Suppression (NMS) to the predictions, to remove the overlapping predictions, the IOU threshold is the threshold used in NMS. [!embed](https://youtu.be/bYjy82Ug2wU?t=150) ================================================ FILE: docs/main_features/tracking/index.yml ================================================ label: Tracking icon: https://github.com/0ssamaak0/DLTA-AI/blob/master/DLTA_AI_app/labelme/icons/tracking.png?raw=true expanded: true order: 4 ================================================ FILE: docs/main_features/tracking/interpolation.md ================================================ --- icon: order: 1 --- # Interpolation Tracking ## Interpolation Method ### Linear Interpolation ### SAM Interpolation ## Interpolation Between ### Selected Keyframes ### Detected Frames ================================================ FILE: docs/main_features/tracking/tracking.md ================================================ --- icon: order: 2 --- # Tracking ## Model Selection ## Tracking Options ## Visualization Options ## Edit propagation ## Delete Options ================================================ FILE: docs/model_selection/index.yml ================================================ label: Model Selection icon: ":robot_face:" expanded: true order: 30 ================================================ FILE: docs/model_selection/merge.md ================================================ --- icon: https://github.com/0ssamaak0/DLTA-AI/blob/master/DLTA_AI_app/labelme/icons/merge.png?raw=true order: 5 --- # Merge Models This page is under construction 🚧, please check back later. ================================================ FILE: docs/model_selection/model_explorer.md ================================================ --- icon: https://github.com/0ssamaak0/DLTA-AI/blob/master/DLTA_AI_app/labelme/icons/checklist.png?raw=true order: 10 --- # Model Explorer This page is under construction 🚧, please check back later. ================================================ FILE: docs/retype.yml ================================================ input: . output: .retype url: # Add your website address here branding: title: DLTA-AI User Guide logo: assets/icon.png colors: label: text: "#ffffff" background: "#ff0000" favicon: assets/icon.png links: - text: GitHub link: https://github.com/0ssamaak0/DLTA-AI icon: mark-github - text: Issues link: https://github.com/0ssamaak0/DLTA-AI/issues icon: bug - text: Release Notes link: https://github.com/0ssamaak0/DLTA-AI/releases icon: note - text: Youtube Channel link: https://www.youtube.com/@DLTA-AI icon: video footer: copyright: "DLTA-AI is licensed under GPLv3 License" links: - text: License link: https://github.com/0ssamaak0/DLTA-AI/blob/master/LICENSE icon: law ================================================ FILE: docs/user_interface.md ================================================ --- label: User Interface icon: https://github.com/0ssamaak0/DLTA-AI/blob/master/DLTA_AI_app/labelme/icons/UI.png?raw=true order: 1 --- This page is under construction 🚧, please check back later. ================================================ FILE: releasenotes.md ================================================ # New Features 🌟 - # Bug Fixes 🐞 - Closing the save dialog without saving annotations no more enables the export button #45 ================================================ FILE: requirements.txt ================================================ PyQt6==6.6.0 QtPy==2.3.1 termcolor==2.2.0 imgviz==1.7.2 opencv-python==4.7.0.72 pyqtdarktheme==2.1.0 supervision==0.3.2 gdown==4.7.1 ultralytics==8.0.61 onemetric==0.1.2 easydict==1.10 openmim==0.3.2 mmdet==2.25.2 scikit-image==0.20.0 filterpy==1.4.5 segment-anything==1.0 lap==0.4.0 orjson==3.8.12 notify-py==0.3.42 psutil==5.9.4 shapely==2.0.1 screeninfo==0.8.1 ================================================ FILE: setup.py ================================================ import setuptools with open("README.md", "r", encoding="utf8") as fh: long_description = fh.read() with open("requirements.txt", "r") as f: requirements = f.read().splitlines() __version__ = "0.0.0" with open("DLTA_AI_app/labelme/__init__.py", "r") as f: for line in f.readlines(): if line.startswith("__version__"): __version__ = line.split("=")[1].strip().strip('"') break setuptools.setup( name="DLTA-AI", version=f"{__version__}", author="0ssamaak0", author_email="0ssamaak0@gmail.com", description="DLTA-AI is the next generation of annotation tools, integrating the power of Computer Vision SOTA models to Labelme in a seamless expirence and intuitive workflow to make creating image datasets easier than ever before", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/0ssamaak0/DLTA-AI", package_dir={"DLTA_AI_app": "DLTA_AI_app"}, python_requires='>=3.8', install_requires=requirements, package_data={"": ["*"]}, license="GPLv3", entry_points={ "console_scripts": [ "DLTA-AI=DLTA_AI_app.__main__:main" ] } ) ================================================ FILE: yolo training commands.txt ================================================ yolo task=segment mode=train model=yolov8n-seg.pt epochs= 1 imgsz = 320 workers =2 batch = 4 data=datasets\thermalseg.v6i.yolov8\data.yaml